[med-svn] [r-bioc-deseq2] 01/06: New upstream version 1.16.1

Andreas Tille tille at debian.org
Mon Oct 2 20:31:21 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-bioc-deseq2.

commit e4d563bb5d1eb970c6b7287a7305e7e91c9b75bd
Author: Andreas Tille <tille at debian.org>
Date:   Mon Oct 2 22:00:34 2017 +0200

    New upstream version 1.16.1
---
 DESCRIPTION                               |   11 +-
 NAMESPACE                                 |    2 +
 NEWS                                      |   71 +-
 R/RcppExports.R                           |   14 +-
 R/core.R                                  |  863 +++-------
 R/fitNbinomGLMs.R                         |  375 +++++
 R/helper.R                                |  178 ++-
 R/methods.R                               |   38 +-
 R/plots.R                                 |   63 +-
 R/results.R                               |   72 +-
 R/wrappers.R                              |  108 ++
 build/vignette.rds                        |  Bin 231 -> 219 bytes
 inst/CITATION                             |    8 +-
 inst/doc/DESeq2.R                         |  389 +++--
 inst/doc/DESeq2.Rmd                       | 2420 +++++++++++++++++++++++++++++
 inst/doc/DESeq2.Rnw                       | 2414 ----------------------------
 inst/doc/DESeq2.html                      | 1756 +++++++++++++++++++++
 inst/doc/DESeq2.pdf                       |  Bin 672224 -> 0 bytes
 man/DESeq.Rd                              |   16 +-
 man/DESeq2-package.Rd                     |    7 +-
 man/DESeqDataSet.Rd                       |    4 +-
 man/DESeqResults.Rd                       |    2 +-
 man/DESeqTransform.Rd                     |    2 +-
 man/coef.Rd                               |    1 -
 man/collapseReplicates.Rd                 |    1 -
 man/counts.Rd                             |    8 +-
 man/design.Rd                             |    2 +-
 man/dispersionFunction.Rd                 |    5 +-
 man/dispersions.Rd                        |   11 +-
 man/estimateBetaPriorVar.Rd               |    2 +-
 man/estimateDispersions.Rd                |    1 -
 man/estimateDispersionsGeneEst.Rd         |    6 +-
 man/estimateSizeFactors.Rd                |   27 +-
 man/estimateSizeFactorsForMatrix.Rd       |    7 +-
 man/fpkm.Rd                               |    1 -
 man/fpm.Rd                                |    1 -
 man/lfcShrink.Rd                          |   43 +
 man/makeExampleDESeqDataSet.Rd            |    1 -
 man/nbinomLRT.Rd                          |   17 +-
 man/nbinomWaldTest.Rd                     |   53 +-
 man/normTransform.Rd                      |    1 -
 man/normalizationFactors.Rd               |    5 +-
 man/normalizeGeneLength.Rd                |    1 -
 man/plotCounts.Rd                         |   15 +-
 man/plotDispEsts.Rd                       |   13 +-
 man/plotMA.Rd                             |   10 +-
 man/plotPCA.Rd                            |    1 -
 man/plotSparsity.Rd                       |    1 -
 man/replaceOutliers.Rd                    |    2 +-
 man/results.Rd                            |   14 +-
 man/rlog.Rd                               |    2 +-
 man/show.Rd                               |    1 -
 man/sizeFactors.Rd                        |    8 +-
 man/summary.Rd                            |    6 +-
 man/unmix.Rd                              |   43 +
 man/varianceStabilizingTransformation.Rd  |   10 +-
 man/vst.Rd                                |    1 -
 src/DESeq2.cpp                            |  103 +-
 src/RcppExports.cpp                       |   48 +-
 tests/testthat/test_1vs1.R                |    9 +-
 tests/testthat/test_DESeq.R               |   37 +-
 tests/testthat/test_LRT.R                 |   20 +-
 tests/testthat/test_LRT_prior.R           |   13 -
 tests/testthat/test_QR.R                  |   18 +-
 tests/testthat/test_addMLE.R              |   32 +-
 tests/testthat/test_betaFitting.R         |   81 +-
 tests/testthat/test_collapse.R            |   15 +-
 tests/testthat/test_construction_errors.R |   69 +-
 tests/testthat/test_counts_input.R        |   27 +-
 tests/testthat/test_custom_filt.R         |   47 +-
 tests/testthat/test_disp_fit.R            |  211 ++-
 tests/testthat/test_dispersions.R         |   51 +-
 tests/testthat/test_edge_case.R           |   95 +-
 tests/testthat/test_factors.R             |   19 +-
 tests/testthat/test_fpkm.R                |   13 +-
 tests/testthat/test_frozen_transform.R    |   49 +-
 tests/testthat/test_htseq.R               |   17 +-
 tests/testthat/test_interactions.R        |   18 +-
 tests/testthat/test_lfcShrink.R           |   16 +
 tests/testthat/test_linear_mu.R           |   43 +-
 tests/testthat/test_methods.R             |   19 +-
 tests/testthat/test_model_matrix.R        |   53 +-
 tests/testthat/test_nbinomWald.R          |   45 +-
 tests/testthat/test_optim.R               |   76 +-
 tests/testthat/test_outlier.R             |  132 +-
 tests/testthat/test_parallel.R            |  113 +-
 tests/testthat/test_plots.R               |   53 +-
 tests/testthat/test_results.R             |  310 ++--
 tests/testthat/test_rlog.R                |   45 +-
 tests/testthat/test_size_factor.R         |   66 +-
 tests/testthat/test_tximport.R            |   44 +-
 tests/testthat/test_unmix_samples.R       |   51 +
 tests/testthat/test_vst.R                 |   57 +-
 tests/testthat/test_weights.R             |   91 ++
 tests/testthat/test_zero_zero.R           |   18 -
 vignettes/DESeq2.Rmd                      | 2420 +++++++++++++++++++++++++++++
 vignettes/DESeq2.Rnw                      | 2414 ----------------------------
 vignettes/library.bib                     |   22 +-
 vignettes/sed_call                        |    1 +
 99 files changed, 9278 insertions(+), 6937 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index fb3ae36..19a566f 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -2,7 +2,7 @@ Package: DESeq2
 Type: Package
 Title: Differential gene expression analysis based on the negative
         binomial distribution
-Version: 1.14.1
+Version: 1.16.1
 Author: Michael Love, Simon Anders, Wolfgang Huber
 Maintainer: Michael Love <michaelisaiahlove at gmail.com>
 Description: Estimate variance-mean dependence in count data from
@@ -15,11 +15,12 @@ Imports: BiocGenerics (>= 0.7.5), Biobase, BiocParallel, genefilter,
         methods, locfit, geneplotter, ggplot2, Hmisc, Rcpp (>= 0.11.0)
 Depends: S4Vectors (>= 0.9.25), IRanges, GenomicRanges,
         SummarizedExperiment (>= 1.1.6)
-Suggests: testthat, knitr, BiocStyle, vsn, pheatmap, RColorBrewer,
-        airway, IHW, tximport, tximportData, readr, pasilla (>= 0.2.10)
+Suggests: testthat, knitr, BiocStyle, vsn, pheatmap, RColorBrewer, IHW,
+        tximport, tximportData, readr, pbapply, airway, pasilla (>=
+        0.2.10)
 LinkingTo: Rcpp, RcppArmadillo
 biocViews: Sequencing, ChIPSeq, RNASeq, SAGE, DifferentialExpression,
         GeneExpression, Transcription
-RoxygenNote: 5.0.1
+RoxygenNote: 6.0.1
 NeedsCompilation: yes
-Packaged: 2016-12-01 00:14:26 UTC; biocbuild
+Packaged: 2017-05-05 23:25:16 UTC; biocbuild
diff --git a/NAMESPACE b/NAMESPACE
index 81f2bcf..59ee26a 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -24,6 +24,7 @@ export(estimateSizeFactorsForMatrix)
 export(fpkm)
 export(fpm)
 export(getVarianceStabilizedData)
+export(lfcShrink)
 export(makeExampleDESeqDataSet)
 export(nbinomLRT)
 export(nbinomWaldTest)
@@ -40,6 +41,7 @@ export(resultsNames)
 export(rlog)
 export(rlogTransformation)
 export(summary.DESeqResults)
+export(unmix)
 export(varianceStabilizingTransformation)
 export(vst)
 exportClasses(DESeqDataSet)
diff --git a/NEWS b/NEWS
index 09b5455..18be64e 100644
--- a/NEWS
+++ b/NEWS
@@ -1,7 +1,74 @@
-CHANGES IN VERSION 1.14.1
+CHANGES IN VERSION 1.16.0
+-------------------------
+
+    o DESeq() and nbinomWaldTest() the default setting
+      will be betaPrior=FALSE, and the recommended pipeline
+      will be to use lfcShrink() for producing shrunken LFC.
+    o Added a new function unmix(), for unmixing samples
+      according to linear combination of pure components,
+      e.g. "tissue deconvolution".
+    o Added a new size factor estimator, "poscounts",
+      which evolved out of use cases in Paul McMurdie's
+      phyloseq package.
+    o Ability to specify observation-specific weights,
+      using assays(dds)[["weights"]]. These weights are
+      picked up by dispersion and NB GLM fitting functions.
+
+CHANGES IN VERSION 1.15.40
+--------------------------
+
+    o Adding a new function unmix(), for
+      unmixing samples according to pure components,
+      e.g. "tissue deconvolution". The pure components
+      are added on the gene expression scale
+      (either normalized counts or TPMs), and the loss
+      is calculated in a variance stabilized space.
+
+CHANGES IN VERSION 1.15.39
+--------------------------
+
+    o Added a new size factor estimator, "poscounts",
+      which evolved out of use cases in Paul McMurdie's
+      phyloseq package.
+
+CHANGES IN VERSION 1.15.36
+--------------------------
+
+    o Ability to specify observation-specific weights,
+      using assays(dds)[["weights"]]. These weights are
+      picked up by dispersion and NB GLM fitting functions.
+
+CHANGES IN VERSION 1.15.28
+--------------------------
+
+    o Remove some code that would "zero out" LFCs
+      when both groups involved in a contrast had zero counts.
+      This lead to inconsistency when similarly contrasts
+      were performed by refactoring.
+
+CHANGES IN VERSION 1.15.12
+--------------------------
+
+    o DESeq() and nbinomWaldTest() the default setting
+      will be betaPrior=FALSE, and the recommended pipeline
+      will be to use lfcShrink() for producing shrunken
+      log2 fold changes for visualization and ranking.
+      Explanation for the change is presented in the
+      vignette section:
+      "Methods changes since the 2014 DESeq2 paper"
+
+CHANGES IN VERSION 1.15.9
+-------------------------
+
+    o Adding prototype function lfcShrink().
+
+    o Vignette conversion to Rmarkdown / HTML.
+
+CHANGES IN VERSION 1.15.3
 -------------------------
 
-    o Removed deprecated armadillo argument in DESeq2.cpp
+    o Removing betaPrior option for nbinomLRT, in an effort
+      to clean up and reduce old un-used functionality.
 
 CHANGES IN VERSION 1.13.8
 -------------------------
diff --git a/R/RcppExports.R b/R/RcppExports.R
index 108ec3a..851e9e8 100644
--- a/R/RcppExports.R
+++ b/R/RcppExports.R
@@ -1,15 +1,15 @@
-# This file was generated by Rcpp::compileAttributes
+# Generated by using Rcpp::compileAttributes() -> do not edit by hand
 # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
 
-fitDisp <- function(ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP, tolSEXP, maxitSEXP, use_priorSEXP) {
-    .Call('DESeq2_fitDisp', PACKAGE = 'DESeq2', ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP, tolSEXP, maxitSEXP, use_priorSEXP)
+fitDisp <- function(ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP, tolSEXP, maxitSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP) {
+    .Call('DESeq2_fitDisp', PACKAGE = 'DESeq2', ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP, tolSEXP, maxitSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP)
 }
 
-fitBeta <- function(ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP, beta_matSEXP, lambdaSEXP, tolSEXP, maxitSEXP, useQRSEXP) {
-    .Call('DESeq2_fitBeta', PACKAGE = 'DESeq2', ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP, beta_matSEXP, lambdaSEXP, tolSEXP, maxitSEXP, useQRSEXP)
+fitBeta <- function(ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP, beta_matSEXP, lambdaSEXP, weightsSEXP, useWeightsSEXP, tolSEXP, maxitSEXP, useQRSEXP) {
+    .Call('DESeq2_fitBeta', PACKAGE = 'DESeq2', ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP, beta_matSEXP, lambdaSEXP, weightsSEXP, useWeightsSEXP, tolSEXP, maxitSEXP, useQRSEXP)
 }
 
-fitDispGrid <- function(ySEXP, xSEXP, mu_hatSEXP, disp_gridSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, use_priorSEXP) {
-    .Call('DESeq2_fitDispGrid', PACKAGE = 'DESeq2', ySEXP, xSEXP, mu_hatSEXP, disp_gridSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, use_priorSEXP)
+fitDispGrid <- function(ySEXP, xSEXP, mu_hatSEXP, disp_gridSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP) {
+    .Call('DESeq2_fitDispGrid', PACKAGE = 'DESeq2', ySEXP, xSEXP, mu_hatSEXP, disp_gridSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP)
 }
 
diff --git a/R/core.R b/R/core.R
index 1c249f3..96bbdfb 100644
--- a/R/core.R
+++ b/R/core.R
@@ -2,17 +2,20 @@
 #
 # DESeq2 organization of R files
 #
-# core.R ......... most of the statistical code (example call below)
-# methods.R ...... the S4 methods (estimateSizeFactors, etc.)
-# AllClasses.R ... class definitions and object constructors
-# AllGenerics.R .. the generics defined in DESeq2
-# results.R ...... results() function and helpers
-# plots.R ........ all plotting functions
-# helper.R ....... collapseReplicates, fpkm, fpm, DESeqParallel
-# expanded.R ..... helpers for dealing with expanded model matrices
-# RcppExports.R .. the R wrappers for the C++ functions
-# rlogTransformation.R
-# varianceStabilizingTransformation.R
+# core ........... most of the statistical code (example call below)
+# fitNbinomGLMs .. three functions for fitting NB GLMs
+# methods ........ the S4 methods (estimateSizeFactors, etc.)
+# AllClasses ..... class definitions and object constructors
+# AllGenerics .... the generics defined in DESeq2
+# results ........ results() function and helpers
+# plots .......... all plotting functions
+# helper ......... lfcShrink, collapseReplicates, fpkm, fpm, DESeqParallel
+# expanded ....... helpers for dealing with expanded model matrices
+# wrappers ....... the R wrappers for the C++ functions (mine)
+# RcppExports .... the R wrappers for the C++ functions (auto)
+#
+# rlogTransformation ... rlog
+# varianceStabilizingTransformation ... VST
 #
 # general outline of the internal function calls.
 # note: not all of these functions are exported.
@@ -159,8 +162,9 @@ NULL
 #' @param betaPrior whether or not to put a zero-mean normal prior on
 #' the non-intercept coefficients 
 #' See \code{\link{nbinomWaldTest}} for description of the calculation
-#' of the beta prior. By default, the beta prior is used only for the
-#' Wald test, but can also be specified for the likelihood ratio test.
+#' of the beta prior. In versions \code{>=1.16}, the default is set
+#' to \code{FALSE}, and shrunken LFCs are obtained afterwards using
+#' \code{\link{lfcShrink}}.
 #' @param full for \code{test="LRT"}, the full model formula,
 #' which is restricted to the formula in \code{design(object)}.
 #' alternatively, it can be a model matrix constructed by the user.
@@ -228,6 +232,10 @@ NULL
 #' dds <- DESeq(dds)
 #' res <- results(dds)
 #'
+#' # moderated log2 fold changes
+#' resultsNames(dds)
+#' resLFC <- lfcShrink(dds, coef=2, res=res)
+#' 
 #' # an alternate analysis: likelihood ratio test
 #' ddsLRT <- DESeq(dds, test="LRT", reduced= ~ 1)
 #' resLRT <- results(ddsLRT)
@@ -248,14 +256,7 @@ DESeq <- function(object, test=c("Wald","LRT"),
   modelAsFormula <- !is.matrix(full)
   
   if (missing(betaPrior)) {
-    betaPrior <- if (modelAsFormula) {
-      termsOrder <- attr(terms.formula(design(object)),"order")
-      interactionPresent <- any(termsOrder > 1)
-      # use beta prior for Wald tests and when no interaction terms are included
-      (test == "Wald") & !interactionPresent
-    } else {
-      FALSE
-    }
+    betaPrior <- FALSE
   } else {
     stopifnot(is.logical(betaPrior))
   }
@@ -266,8 +267,11 @@ DESeq <- function(object, test=c("Wald","LRT"),
     if (missing(reduced)) {
       stop("likelihood ratio test requires a 'reduced' design, see ?DESeq")
     }
-    if (!missing(modelMatrixType) && modelMatrixType == "expanded") {
-      stop("test='LRT' only implemented for standard model matrices")
+    if (betaPrior) {
+      stop("test='LRT' does not support use of LFC shrinkage, use betaPrior=FALSE")
+    }
+    if (!missing(modelMatrixType) && modelMatrixType=="expanded") {
+      stop("test='LRT' does not support use of expanded model matrix")
     }
     if (is.matrix(full) | is.matrix(reduced)) {
       if (!(is.matrix(full) & is.matrix(reduced))) {
@@ -289,9 +293,12 @@ DESeq <- function(object, test=c("Wald","LRT"),
   }
   
   if (modelAsFormula) {
-
     # run some tests common to DESeq, nbinomWaldTest, nbinomLRT
     designAndArgChecker(object, betaPrior)
+
+    if (design(object) == formula(~1)) {
+      warning("the design is ~ 1 (just an intercept). is this intended?")
+    }
     
     if (full != design(object)) {
       stop("'full' specified as formula should equal design(object)")
@@ -331,8 +338,7 @@ DESeq <- function(object, test=c("Wald","LRT"),
                                modelMatrix=modelMatrix,
                                modelMatrixType=modelMatrixType)
     } else if (test == "LRT") {
-      object <- nbinomLRT(object, full=full, reduced=reduced,
-                          betaPrior=betaPrior, quiet=quiet)
+      object <- nbinomLRT(object, full=full, reduced=reduced, quiet=quiet)
     }
   } else if (parallel) {
     object <- DESeqParallel(object, test=test, fitType=fitType,
@@ -456,11 +462,13 @@ makeExampleDESeqDataSet <- function(n=1000,m=12,betaSD=0,interceptMean=4,interce
 #' estimateSizeFactorsForMatrix(counts(dds),geoMeans=geoMeans)
 #' 
 #' @export
-estimateSizeFactorsForMatrix <- function( counts, locfunc = stats::median, geoMeans, controlGenes )
-{
+estimateSizeFactorsForMatrix <- function(counts, locfunc=stats::median,
+                                         geoMeans, controlGenes) {
   if (missing(geoMeans)) {
+    incomingGeoMeans <- FALSE
     loggeomeans <- rowMeans(log(counts))
   } else {
+    incomingGeoMeans <- TRUE
     if (length(geoMeans) != nrow(counts)) {
       stop("geoMeans should be as long as the number of rows of counts")
     }
@@ -482,6 +490,10 @@ estimateSizeFactorsForMatrix <- function( counts, locfunc = stats::median, geoMe
       exp(locfunc((log(cnts) - loggeomeansSub)[is.finite(loggeomeansSub) & cnts > 0]))
     })
   }
+  if (incomingGeoMeans) {
+    # stabilize size factors to have geometric mean of 1
+    sf <- sf/exp(mean(log(sf)))
+  }
   sf
 }
 
@@ -612,12 +624,22 @@ estimateDispersionsGeneEst <- function(object, minDisp=1e-8, kappa_0=1,
 
   stopifnot(length(niter) == 1 & niter > 0)
 
+  # use weights if they are present in assays(object)
+  # (we need this already to decide about linear mu fitting)
+  wlist <- getAndCheckWeights(object, modelMatrix)
+  weights <- wlist$weights
+  useWeights <- wlist$useWeights
+  
   # use a linear model to estimate the expected counts
   # if the number of groups according to the model matrix
   # is equal to the number of columns
   if (is.null(linearMu)) {
     modelMatrixGroups <- modelMatrixGroups(modelMatrix)
     linearMu <- nlevels(modelMatrixGroups) == ncol(modelMatrix)
+    # also check for weights (then can't do linear mu)
+    if (useWeights) {
+      linearMu <- FALSE
+    }
   }
   
   # below, iterate between mean and dispersion estimation (niter) times
@@ -640,9 +662,11 @@ estimateDispersionsGeneEst <- function(object, minDisp=1e-8, kappa_0=1,
     }
     fitMu[fitMu < minmu] <- minmu
     mu[fitidx,] <- fitMu
+
     # use of kappa_0 in backtracking search
     # initial proposal = log(alpha) + kappa_0 * deriv. of log lik. w.r.t. log(alpha)
     # use log(minDisp/10) to stop if dispersions going to -infinity
+
     dispRes <- fitDispWrapper(ySEXP = counts(objectNZ)[fitidx,,drop=FALSE],
                               xSEXP = modelMatrix,
                               mu_hatSEXP = fitMu,
@@ -650,7 +674,9 @@ estimateDispersionsGeneEst <- function(object, minDisp=1e-8, kappa_0=1,
                               log_alpha_prior_meanSEXP = log(alpha_hat)[fitidx],
                               log_alpha_prior_sigmasqSEXP = 1, min_log_alphaSEXP = log(minDisp/10),
                               kappa_0SEXP = kappa_0, tolSEXP = dispTol,
-                              maxitSEXP = maxit, use_priorSEXP = FALSE)
+                              maxitSEXP = maxit, usePriorSEXP = FALSE,
+                              weightsSEXP = weights, useWeightsSEXP = useWeights)
+    
     dispIter[fitidx] <- dispRes$iter
     alpha_hat_new[fitidx] <- pmin(exp(dispRes$log_alpha), maxDisp)
     # only rerun those rows which moved
@@ -669,15 +695,16 @@ estimateDispersionsGeneEst <- function(object, minDisp=1e-8, kappa_0=1,
   }
   dispGeneEstConv <- dispIter < maxit
  
-  # when lacking convergence from the C++ routine
-  # we use an R function to estimate dispersions
-  # by evaluating a grid of posterior evaluations
+  # if lacking convergence from fitDisp() (C++)...
   refitDisp <- !dispGeneEstConv & dispGeneEst > minDisp*10
   if (sum(refitDisp) > 0) {
-    dispGrid <- fitDispGridWrapper(y=counts(objectNZ)[refitDisp,,drop=FALSE],x=modelMatrix,
-                                   mu=mu[refitDisp,,drop=FALSE],
-                                   logAlphaPriorMean=rep(0,sum(refitDisp)),
-                                   logAlphaPriorSigmaSq=1,usePrior=FALSE)
+
+    dispGrid <- fitDispGridWrapper(y = counts(objectNZ)[refitDisp,,drop=FALSE],
+                                   x = modelMatrix,
+                                   mu = mu[refitDisp,,drop=FALSE],
+                                   logAlphaPriorMean = rep(0,sum(refitDisp)),
+                                   logAlphaPriorSigmaSq = 1, usePrior = FALSE,
+                                   weightsSEXP = weights, useWeightsSEXP = useWeights)
     dispGeneEst[refitDisp] <- dispGrid
   }
   dispGeneEst <- pmin(pmax(dispGeneEst, minDisp), maxDisp)
@@ -821,6 +848,11 @@ estimateDispersionsMAP <- function(object, outlierSD=2, dispPriorVar,
 
   # if any missing values, fill in the fitted value to initialize
   dispInit[is.na(dispInit)] <- mcols(objectNZ)$dispFit[is.na(dispInit)]
+
+  # use weights if they are present in assays(object)
+  wlist <- getAndCheckWeights(object, modelMatrix)
+  weights <- wlist$weights
+  useWeights <- wlist$useWeights
   
   # run with prior
   dispResMAP <- fitDispWrapper(ySEXP = counts(objectNZ), xSEXP = modelMatrix, mu_hatSEXP = mu,
@@ -829,24 +861,25 @@ estimateDispersionsMAP <- function(object, outlierSD=2, dispPriorVar,
                                log_alpha_prior_sigmasqSEXP = log_alpha_prior_sigmasq,
                                min_log_alphaSEXP = log(minDisp/10),
                                kappa_0SEXP = kappa_0, tolSEXP = dispTol,
-                               maxitSEXP = maxit, use_priorSEXP = TRUE)
+                               maxitSEXP = maxit, usePriorSEXP = TRUE,
+                               weightsSEXP = weights, useWeightsSEXP = useWeights)
 
   # prepare dispersions for storage in mcols(object)
   dispMAP <- exp(dispResMAP$log_alpha) 
 
-  # when lacking convergence from the C++ routine
-  # we use an R function to estimate dispersions.
-  # This finds the maximum of a smooth curve along a
-  # grid of posterior evaluations
+  # when lacking convergence from fitDisp() (C++)
+  # we use a function to maximize dispersion parameter
+  # along an adaptive grid (also C++)
   dispConv <- dispResMAP$iter < maxit
   refitDisp <- !dispConv
   if (sum(refitDisp) > 0) {
-    dispInR <- fitDispGridWrapper(y = counts(objectNZ)[refitDisp,,drop=FALSE], x = modelMatrix,
+    dispGrid <- fitDispGridWrapper(y = counts(objectNZ)[refitDisp,,drop=FALSE], x = modelMatrix,
                                   mu = mu[refitDisp,,drop=FALSE],
                                   logAlphaPriorMean = log(mcols(objectNZ)$dispFit)[refitDisp],
                                   logAlphaPriorSigmaSq = log_alpha_prior_sigmasq,
-                                  usePrior=TRUE)
-    dispMAP[refitDisp] <- dispInR
+                                  usePrior=TRUE,
+                                  weightsSEXP = weights, useWeightsSEXP = useWeights)
+    dispMAP[refitDisp] <- dispGrid
   }
 
   # bound the dispersion estimate between minDisp and maxDisp for numeric stability
@@ -971,18 +1004,37 @@ estimateDispersionsPriorVar <- function(object, minDisp=1e-8, modelMatrix=NULL)
 #' 
 #' The fitting proceeds as follows: standard maximum likelihood estimates
 #' for GLM coefficients (synonymous with "beta", "log2 fold change", "effect size")
-#' are calculated. A zero-centered Normal prior distribution 
-#' is assumed for the coefficients other than the intercept.
+#' are calculated.
+#' Then, optionally, a zero-centered Normal prior distribution 
+#' (\code{betaPrior}) is assumed for the coefficients other than the intercept.
+#'
+#' Note that this posterior log2 fold change
+#' estimation is now not the default setting for \code{nbinomWaldTest},
+#' as the standard workflow for coefficient shrinkage has moved to
+#' an additional function \code{link{lfcShrink}}.
+#'
+#' For calculating Wald test p-values, the coefficients are scaled by their
+#' standard errors and then compared to a standard Normal distribution. 
+#' The \code{\link{results}}
+#' function without any arguments will automatically perform a contrast of the
+#' last level of the last variable in the design formula over the first level.
+#' The \code{contrast} argument of the \code{\link{results}} function can be used
+#' to generate other comparisons.
+#'  
+#' The Wald test can be replaced with the \code{\link{nbinomLRT}}
+#' for an alternative test of significance.
+#' 
+#' Notes on the log2 fold change prior:
+#' 
 #' The variance of the prior distribution for each
 #' non-intercept coefficient is calculated using the observed
 #' distribution of the maximum likelihood coefficients.  
 #' The final coefficients are then maximum a posteriori estimates
-#' using this prior (Tikhonov/ridge regularization). See below for details on the
+#' using this prior (Tikhonov/ridge regularization). 
+#' See below for details on the
 #' prior variance and the Methods section of the DESeq2 manuscript for more detail.
 #' The use of a prior has little effect on genes with high counts and helps to
 #' moderate the large spread in coefficients for genes with low counts.
-#' For calculating Wald test p-values, the coefficients are scaled by their
-#' standard errors and then compared to a standard Normal distribution. 
 #'
 #' The prior variance is calculated by matching the 0.05 upper quantile
 #' of the observed MLE coefficients to a zero-centered Normal distribution.
@@ -997,11 +1049,7 @@ estimateDispersionsPriorVar <- function(object, minDisp=1e-8, modelMatrix=NULL)
 #' See \code{\link{estimateBetaPriorVar}}.
 #' The final prior variance for a factor level is the average of the
 #' estimated prior variance over all contrasts of all levels of the factor. 
-#' Another change since the 2014 paper: when interaction terms are present
-#' in the design, the prior on log fold changes is turned off
-#' (for more details, see the vignette section, "Methods changes since
-#' the 2014 DESeq2 paper").
-#' 
+#'
 #' When a log2 fold change prior is used (betaPrior=TRUE),
 #' then \code{nbinomWaldTest} will by default use expanded model matrices,
 #' as described in the \code{modelMatrixType} argument, unless this argument
@@ -1009,14 +1057,7 @@ estimateDispersionsPriorVar <- function(object, minDisp=1e-8, modelMatrix=NULL)
 #' This ensures that log2 fold changes will be independent of the choice
 #' of reference level. In this case, the beta prior variance for each factor
 #' is calculated as the average of the mean squared maximum likelihood
-#' estimates for each level and every possible contrast. The \code{\link{results}}
-#' function without any arguments will automatically perform a contrast of the
-#' last level of the last variable in the design formula over the first level.
-#' The \code{contrast} argument of the \code{\link{results}} function can be used
-#' to generate other comparisons.
-#'  
-#' The Wald test can be replaced with the \code{\link{nbinomLRT}}
-#' for an alternative test of significance.
+#' estimates for each level and every possible contrast. 
 #'
 #' @param object a DESeqDataSet
 #' @param betaPrior whether or not to put a zero-mean normal prior on
@@ -1034,6 +1075,7 @@ estimateDispersionsPriorVar <- function(object, minDisp=1e-8, modelMatrix=NULL)
 #' level of factors in addition to an intercept.
 #' betaPrior must be set to TRUE in order for expanded model matrices
 #' to be fit.
+#' @param betaTol control parameter defining convergence
 #' @param maxit the maximum number of iterations to allow for convergence of the
 #' coefficient vector
 #' @param useOptim whether to use the native optim function on rows which do not
@@ -1061,9 +1103,10 @@ estimateDispersionsPriorVar <- function(object, minDisp=1e-8, modelMatrix=NULL)
 #' res <- results(dds)
 #'
 #' @export
-nbinomWaldTest <- function(object, betaPrior, betaPriorVar,
+nbinomWaldTest <- function(object,
+                           betaPrior=FALSE, betaPriorVar,
                            modelMatrix=NULL, modelMatrixType,
-                           maxit=100, useOptim=TRUE, quiet=FALSE,
+                           betaTol=1e-8, maxit=100, useOptim=TRUE, quiet=FALSE,
                            useT=FALSE, df, useQR=TRUE) {
   if (is.null(dispersions(object))) {
     stop("testing requires dispersion estimates, first call estimateDispersions()")
@@ -1083,12 +1126,13 @@ nbinomWaldTest <- function(object, betaPrior, betaPriorVar,
   # only continue on the rows with non-zero row mean
   objectNZ <- object[!mcols(object)$allZero,,drop=FALSE]
 
+  # model matrix not provided...
   if (is.null(modelMatrix)) {
     modelAsFormula <- TRUE
     termsOrder <- attr(terms.formula(design(object)),"order")
     interactionPresent <- any(termsOrder > 1)
     if (missing(betaPrior)) {
-      betaPrior <- !interactionPresent
+      betaPrior <- FALSE
     }
 
     # run some tests common to DESeq, nbinomWaldTest, nbinomLRT
@@ -1115,6 +1159,7 @@ nbinomWaldTest <- function(object, betaPrior, betaPriorVar,
     hasIntercept <- attr(terms(design(object)),"intercept") == 1
     renameCols <- hasIntercept
   } else {
+    # model matrix was provided...
     if (missing(betaPrior)) {
       betaPrior <- FALSE
     } else {
@@ -1129,19 +1174,24 @@ nbinomWaldTest <- function(object, betaPrior, betaPriorVar,
   if (!betaPrior) {
     # fit the negative binomial GLM without a prior
     # (in actuality a very wide prior with standard deviation 1e3 on log2 fold changes)
-    fit <- fitNbinomGLMs(objectNZ, maxit=maxit, useOptim=useOptim, useQR=useQR,
+    fit <- fitNbinomGLMs(objectNZ,
+                         betaTol=betaTol, maxit=maxit,
+                         useOptim=useOptim, useQR=useQR,
                          renameCols=renameCols, modelMatrix=modelMatrix)
     H <- fit$hat_diagonals
+    mu <- fit$mu
     modelMatrix <- fit$modelMatrix
     modelMatrixNames <- fit$modelMatrixNames
     # record the wide prior variance which was used in fitting
     betaPriorVar <- rep(1e6, ncol(fit$modelMatrix))
   } else {
     priorFitList <- fitGLMsWithPrior(object=object,
-                                     maxit=maxit, useOptim=useOptim, useQR=useQR,
+                                     betaTol=betaTol, maxit=maxit,
+                                     useOptim=useOptim, useQR=useQR,
                                      betaPriorVar=betaPriorVar)
     fit <- priorFitList$fit
     H <- priorFitList$H
+    mu <- priorFitList$mu
     betaPriorVar <- priorFitList$betaPriorVar
     modelMatrix <- priorFitList$modelMatrix
     mleBetaMatrix <- priorFitList$mleBetaMatrix
@@ -1152,9 +1202,9 @@ nbinomWaldTest <- function(object, betaPrior, betaPriorVar,
   }
 
   # store mu in case the user did not call estimateDispersionsGeneEst
-  dimnames(fit$mu) <- NULL
-  assays(objectNZ)[["mu"]] <- fit$mu
-  assays(object)[["mu"]] <- buildMatrixWithNARows(fit$mu, mcols(object)$allZero)
+  dimnames(mu) <- NULL
+  assays(objectNZ)[["mu"]] <- mu
+  assays(object)[["mu"]] <- buildMatrixWithNARows(mu, mcols(object)$allZero)
 
   # store the prior variance directly as an attribute
   # of the DESeqDataSet object, so it can be pulled later by
@@ -1345,25 +1395,6 @@ estimateBetaPriorVar <- function(object,
     (betaMatrix)^2
   }
   names(betaPriorVar) <- colnames(betaMatrix)
-
-  # pre-v1.10 code for interactions and beta prior:
-  # ------------------------------------------------------
-  # find the names of betaPriorVar which correspond
-  # to non-interaction terms and set these to a wide prior
-  ## termsOrder <- attr(terms.formula(design(object)),"order")
-  ## interactionPresent <- any(termsOrder > 1)  
-  ## if (interactionPresent) {
-  ##   nonInteractionCols <- getNonInteractionColumnIndices(objectNZ, modelMatrix)
-  ##   if (modelMatrixType == "standard") widePrior <- 1e6 else widePrior <- 1e3
-  ##   betaPriorVar[nonInteractionCols] <- widePrior
-  ##   if (modelMatrixType == "expanded") {
-  ##     # also set a wide prior for additional contrasts which were added
-  ##     # for calculation of the prior variance in the case of
-  ##     # expanded model matrices
-  ##     designFactors <- getDesignFactors(objectNZ)
-  ##     betaPriorVar[which(names(betaPriorVar) %in% paste0(designFactors,"Cntrst"))] <- widePrior
-  ##   }
-  ## }
   
   # intercept set to wide prior
   if ("Intercept" %in% names(betaPriorVar)) {
@@ -1426,9 +1457,6 @@ estimateMLEForBetaPriorVar <- function(object, maxit=100, useOptim=TRUE, useQR=T
   object
 }
 
-
-
-
 #' Likelihood ratio test (chi-squared test) for GLMs
 #'
 #' This function tests for significance of change in deviance between a
@@ -1440,7 +1468,7 @@ estimateMLEForBetaPriorVar <- function(object, maxit=100, useOptim=TRUE, useQR=T
 #' with df = (reduced residual degrees of freedom - full residual degrees of freedom).
 #' This function is comparable to the \code{nbinomGLMTest} of the previous version of DESeq
 #' and an alternative to the default \code{\link{nbinomWaldTest}}.
-#' 
+#'
 #' @param object a DESeqDataSet
 #' @param full the full model formula, this should be the formula in
 #' \code{design(object)}.
@@ -1448,16 +1476,7 @@ estimateMLEForBetaPriorVar <- function(object, maxit=100, useOptim=TRUE, useQR=T
 #' @param reduced a reduced formula to compare against, e.g.
 #' the full model with a term or terms of interest removed.
 #' alternatively, can be a matrix
-#' @param betaPrior whether or not to put a zero-mean normal prior on
-#' the non-intercept coefficients 
-#' While the beta prior is used typically, for the Wald test, it can
-#' also be specified for the likelihood ratio test. For more details
-#' on the calculation, see \code{\link{nbinomWaldTest}}.
-#' @param betaPriorVar a vector with length equal to the number of
-#' model terms including the intercept.
-#  betaPriorVar gives the variance of the prior on the sample betas,
-#' which if missing is estimated from the rows which do not have any
-#' zeros
+#' @param betaTol control parameter defining convergence
 #' @param maxit the maximum number of iterations to allow for convergence of the
 #' coefficient vector
 #' @param useOptim whether to use the native optim function on rows which do not
@@ -1482,8 +1501,7 @@ estimateMLEForBetaPriorVar <- function(object, maxit=100, useOptim=TRUE, useQR=T
 #'
 #' @export
 nbinomLRT <- function(object, full=design(object), reduced,
-                      betaPrior=FALSE, betaPriorVar,
-                      maxit=100, useOptim=TRUE, quiet=FALSE,
+                      betaTol=1e-8, maxit=100, useOptim=TRUE, quiet=FALSE,
                       useQR=TRUE) {
 
   if (is.null(dispersions(object))) {
@@ -1502,7 +1520,7 @@ nbinomLRT <- function(object, full=design(object), reduced,
     checkLRT(full, reduced)
 
     # run some tests common to DESeq, nbinomWaldTest, nbinomLRT
-    designAndArgChecker(object, betaPrior)
+    designAndArgChecker(object, betaPrior=FALSE)
     
     # try to form model matrices, test for difference
     # in residual degrees of freedom
@@ -1512,9 +1530,6 @@ nbinomLRT <- function(object, full=design(object), reduced,
                             data=as.data.frame(colData(object)))
     df <- ncol(fullModelMatrix) - ncol(reducedModelMatrix)
   } else {
-    if (betaPrior) {
-      stop("user-supplied model matrices require betaPrior=FALSE")
-    }
     message("using supplied model matrix")
     df <- ncol(full) - ncol(reduced)
   }
@@ -1530,7 +1545,6 @@ nbinomLRT <- function(object, full=design(object), reduced,
     object <- getBaseMeansAndVariances(object)
   }
   
-  stopifnot(is.logical(betaPrior))
   if (modelAsFormula) {
     modelMatrixType <- "standard"
     # check for intercept
@@ -1541,57 +1555,39 @@ nbinomLRT <- function(object, full=design(object), reduced,
     renameCols <- FALSE
   }
 
-  # store modelMatrixType so it can be accessed by estimateBetaPriorVar
+  # store modelMatrixType
   attr(object,"modelMatrixType") <- modelMatrixType
 
   # only continue on the rows with non-zero row mean
   objectNZ <- object[!mcols(object)$allZero,,drop=FALSE]
 
-  if (!betaPrior) {
-    if (modelAsFormula) {
-      fullModel <- fitNbinomGLMs(objectNZ, modelFormula=full,
-                                 renameCols=renameCols, maxit=maxit,
-                                 useOptim=useOptim, useQR=useQR, warnNonposVar=FALSE)
-      modelMatrix <- fullModel$modelMatrix
-      reducedModel <- fitNbinomGLMs(objectNZ, modelFormula=reduced, maxit=maxit,
-                                    useOptim=useOptim, useQR=useQR, warnNonposVar=FALSE)
-    } else {
-      fullModel <- fitNbinomGLMs(objectNZ, modelMatrix=full,
-                                 renameCols=FALSE, maxit=maxit,
-                                 useOptim=useOptim, useQR=useQR, warnNonposVar=FALSE)
-      modelMatrix <- full
-      reducedModel <- fitNbinomGLMs(objectNZ, modelMatrix=reduced,
-                                    renameCols=FALSE, maxit=maxit,
-                                    useOptim=useOptim, useQR=useQR, warnNonposVar=FALSE)
-    }
-    betaPriorVar <- rep(1e6, ncol(modelMatrix))
-  } else {
-    priorFull <- fitGLMsWithPrior(object=object,
-                                  maxit=maxit, useOptim=useOptim, useQR=useQR,
-                                  betaPriorVar=betaPriorVar)
-    fullModel <- priorFull$fit
+  if (modelAsFormula) {
+    fullModel <- fitNbinomGLMs(objectNZ, modelFormula=full,
+                               renameCols=renameCols,
+                               betaTol=betaTol, maxit=maxit,
+                               useOptim=useOptim, useQR=useQR,
+                               warnNonposVar=FALSE)
     modelMatrix <- fullModel$modelMatrix
-    betaPriorVar <- priorFull$betaPriorVar
-    mleBetaMatrix <- priorFull$mleBetaMatrix
-    # form a reduced model matrix:
-    # first find the dropped terms
-    # then remove columns from the full model matrix which are
-    # assigned to these terms
-    fullModelTerms <- attr(terms(full),"term.labels")
-    reducedModelTerms <- attr(terms(reduced),"term.labels")
-    droppedTerms <- which(!fullModelTerms %in% reducedModelTerms)
-    fullAssign <- attr(modelMatrix,"assign")
-    idx <- !fullAssign %in% droppedTerms
-    # now subsetting the relevant columns
-    reducedModelMatrix <- modelMatrix[,idx,drop=FALSE]
-    reducedBetaPriorVar <- betaPriorVar[idx]
-    reducedLambda <- 1/reducedBetaPriorVar
-    reducedModel <- fitNbinomGLMs(objectNZ, modelMatrix=reducedModelMatrix,
-                                  lambda=reducedLambda,
-                                  maxit=maxit, useOptim=useOptim, useQR=useQR)
+    reducedModel <- fitNbinomGLMs(objectNZ, modelFormula=reduced,
+                                  betaTol=betaTol, maxit=maxit,
+                                  useOptim=useOptim, useQR=useQR,
+                                  warnNonposVar=FALSE)
+  } else {
+    fullModel <- fitNbinomGLMs(objectNZ, modelMatrix=full,
+                               renameCols=FALSE,
+                               betaTol=betaTol, maxit=maxit,
+                               useOptim=useOptim, useQR=useQR,
+                               warnNonposVar=FALSE)
+    modelMatrix <- full
+    reducedModel <- fitNbinomGLMs(objectNZ, modelMatrix=reduced,
+                                  renameCols=FALSE,
+                                  betaTol=betaTol, maxit=maxit,
+                                  useOptim=useOptim, useQR=useQR,
+                                  warnNonposVar=FALSE)
   }
+  betaPriorVar <- rep(1e6, ncol(modelMatrix))
   
-  attr(object,"betaPrior") <- betaPrior
+  attr(object,"betaPrior") <- FALSE
   attr(object,"betaPriorVar") <- betaPriorVar
   attr(object,"modelMatrix") <- modelMatrix
   attr(object,"reducedModelMatrix") <- reducedModel$modelMatrix
@@ -1623,11 +1619,8 @@ nbinomLRT <- function(object, full=design(object), reduced,
   LRTStatistic <- (2 * (fullModel$logLike - reducedModel$logLike))
   LRTPvalue <- pchisq(LRTStatistic, df=df, lower.tail=FALSE)
 
-  mleBetas <- if (betaPrior) {
-    matrixToList(mleBetaMatrix)
-  } else {
-    NULL
-  }
+  # no need to store additional betas (no beta prior)
+  mleBetas <- NULL
   
   # continue storing LRT results
   resultsList <- c(matrixToList(fullModel$betaMatrix),
@@ -1651,14 +1644,10 @@ nbinomLRT <- function(object, full=design(object), reduced,
 
   modelMatrixNames <- colnames(fullModel$betaMatrix)
   modelMatrixNamesSpaces <- gsub("_"," ",modelMatrixNames)
-  lfcType <- if (attr(object,"betaPrior")) "MAP" else "MLE"
+  lfcType <- "MLE"
   coefInfo <- paste(paste0("log2 fold change (",lfcType,"):"),modelMatrixNamesSpaces)
   seInfo <- paste("standard error:",modelMatrixNamesSpaces)
-  mleInfo <- if (betaPrior) {
-    gsub("_"," ",colnames(mleBetaMatrix))
-  } else {
-    NULL
-  }
+  mleInfo <- NULL
   statInfo <- paste("LRT statistic:",modelComparison)
   pvalInfo <- paste("LRT p-value:",modelComparison)
 
@@ -1863,9 +1852,15 @@ localDispersionFit <- function( means, disps, minDisp ) {
 
 # convenience function for testing the log likelihood
 # for a count matrix, mu matrix and vector disp
-nbinomLogLike <- function(counts, mu, disp) {
-  rowSums(matrix(dnbinom(counts, mu=mu,size=1/disp,
-                         log=TRUE),ncol=ncol(counts)))
+nbinomLogLike <- function(counts, mu, disp, weights, useWeights) {
+  if (is.null(disp)) return(NULL)
+  if (useWeights) {
+    rowSums(weights * matrix(dnbinom(counts,mu=mu,size=1/disp,
+                           log=TRUE),ncol=ncol(counts)))
+  } else {
+    rowSums(matrix(dnbinom(counts,mu=mu,size=1/disp,
+                           log=TRUE),ncol=ncol(counts)))    
+  }
 }
 
 # simple function to return a matrix of size factors
@@ -1879,273 +1874,6 @@ getSizeOrNormFactors <- function(object) {
   }
 }
 
-# Unexported, low-level function for fitting negative binomial GLMs
-#
-# Users typically call \code{\link{nbinomWaldTest}} or \code{\link{nbinomLRT}}
-# which calls this function to perform fitting.  These functions return
-# a \code{\link{DESeqDataSet}} object with the appropriate columns
-# added.  This function returns results as a list.
-#
-# object a DESeqDataSet
-# modelMatrix the design matrix
-# modelFormula a formula specifying how to construct the design matrix
-# alpha_hat the dispersion parameter estimates
-# lambda the 'ridge' term added for the penalized GLM on the log2 scale
-# renameCols whether to give columns variable_B_vs_A style names
-# betaTol control parameter: stop when the following is satisfied:
-#   abs(dev - dev_old)/(abs(dev) + 0.1) < betaTol
-# maxit control parameter: maximum number of iteration to allow for
-#   convergence
-# useOptim whether to use optim on rows which have not converged:
-#   Fisher scoring is not ideal with multiple groups and sparse
-#   count distributions
-# useQR whether to use the QR decomposition on the design matrix X
-# forceOptim whether to use optim on all rows
-# warnNonposVar whether to warn about non positive variances,
-#   for advanced users only running LRT without beta prior,
-#   this might be desirable to be ignored.
-#
-# return a list of results, with coefficients and standard
-# errors on the log2 scale
-fitNbinomGLMs <- function(object, modelMatrix=NULL, modelFormula, alpha_hat, lambda,
-                          renameCols=TRUE, betaTol=1e-8, maxit=100, useOptim=TRUE,
-                          useQR=TRUE, forceOptim=FALSE, warnNonposVar=TRUE) {
-  if (missing(modelFormula)) {
-    modelFormula <- design(object)
-  }
-  if (is.null(modelMatrix)) {
-    modelAsFormula <- TRUE
-    modelMatrix <- stats::model.matrix.default(modelFormula,
-                     data=as.data.frame(colData(object)))
-  } else {
-    modelAsFormula <- FALSE
-  }
-
-  stopifnot(all(colSums(abs(modelMatrix)) > 0))
-
-  # rename columns, for use as columns in DataFrame
-  # and to emphasize the reference level comparison
-  modelMatrixNames <- colnames(modelMatrix)
-  modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
-  modelMatrixNames <- make.names(modelMatrixNames)
-  
-  if (renameCols) {
-    convertNames <- renameModelMatrixColumns(colData(object),
-                                             modelFormula)
-    convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
-    modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
-  }
-  colnames(modelMatrix) <- modelMatrixNames
-  
-  normalizationFactors <- getSizeOrNormFactors(object)
-  
-  if (missing(alpha_hat)) {
-    alpha_hat <- dispersions(object)
-  }
-
-  if (length(alpha_hat) != nrow(object)) {
-    stop("alpha_hat needs to be the same length as nrows(object)")
-  }
-
-  # set a wide prior for all coefficients
-  if (missing(lambda)) {
-    lambda <- rep(1e-6, ncol(modelMatrix))
-  }
-  
-  # bypass the beta fitting if the model formula is only intercept and
-  # the prior variance is large (1e6)
-  # i.e., LRT with reduced ~ 1 and no beta prior
-  justIntercept <- if (modelAsFormula) {
-    modelFormula == formula(~ 1)
-  } else {
-    ncol(modelMatrix) == 1 & all(modelMatrix == 1)
-  }
-  if (justIntercept & all(lambda <= 1e-6)) {
-      alpha <- alpha_hat
-      betaConv <- rep(TRUE, nrow(object))
-      betaIter <- rep(1,nrow(object))
-      betaMatrix <- matrix(log2(mcols(object)$baseMean),ncol=1)
-      mu <- normalizationFactors * as.numeric(2^betaMatrix)
-      logLike <- rowSums(dnbinom(counts(object), mu=mu, size=1/alpha, log=TRUE))
-      deviance <- -2 * logLike
-      modelMatrix <- stats::model.matrix.default(~ 1, as.data.frame(colData(object)))
-      colnames(modelMatrix) <- modelMatrixNames <- "Intercept"
-      w <- (mu^-1 + alpha)^-1
-      xtwx <- rowSums(w)
-      sigma <- xtwx^-1
-      betaSE <- matrix(log2(exp(1)) * sqrt(sigma),ncol=1)      
-      hat_diagonals <- w * xtwx^-1;
-      res <- list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
-                  betaSE = betaSE, mu = mu, betaIter = betaIter,
-                  deviance = deviance,
-                  modelMatrix=modelMatrix, 
-                  nterms=1, hat_diagonals=hat_diagonals)
-      return(res)
-  }
-  
-  qrx <- qr(modelMatrix)
-  # if full rank, estimate initial betas for IRLS below
-  if (qrx$rank == ncol(modelMatrix)) {
-    Q <- qr.Q(qrx)
-    R <- qr.R(qrx)
-    y <- t(log(counts(object,normalized=TRUE) + .1))
-    beta_mat <- t(solve(R, t(Q) %*% y))
-  } else {
-    if ("Intercept" %in% modelMatrixNames) {
-      beta_mat <- matrix(0, ncol=ncol(modelMatrix), nrow=nrow(object))
-      # use the natural log as fitBeta occurs in the natural log scale
-      logBaseMean <- log(rowMeans(counts(object,normalized=TRUE)))
-      beta_mat[,which(modelMatrixNames == "Intercept")] <- logBaseMean
-    } else {
-      beta_mat <- matrix(1, ncol=ncol(modelMatrix), nrow=nrow(object))
-    }
-  }
-  
-  # here we convert from the log2 scale of the betas
-  # and the beta prior variance to the log scale
-  # used in fitBeta.
-  # so we divide by the square of the
-  # conversion factor, log(2)
-  lambdaLogScale <- lambda / log(2)^2
-
-  betaRes <- fitBetaWrapper(ySEXP = counts(object), xSEXP = modelMatrix,
-                            nfSEXP = normalizationFactors,
-                            alpha_hatSEXP = alpha_hat,
-                            beta_matSEXP = beta_mat,
-                            lambdaSEXP = lambdaLogScale,
-                            tolSEXP = betaTol, maxitSEXP = maxit,
-                            useQRSEXP=useQR)
-  mu <- normalizationFactors * t(exp(modelMatrix %*% t(betaRes$beta_mat)))
-  dispersionVector <- rep(dispersions(object), times=ncol(object))
-  logLike <- nbinomLogLike(counts(object), mu, dispersions(object))
-
-  # test for stability
-  rowStable <- apply(betaRes$beta_mat,1,function(row) sum(is.na(row))) == 0
-
-  # test for positive variances
-  rowVarPositive <- apply(betaRes$beta_var_mat,1,function(row) sum(row <= 0)) == 0
-  
-  # test for convergence, stability and positive variances
-  betaConv <- betaRes$iter < maxit
-  
-  # here we transform the betaMatrix and betaSE to a log2 scale
-  betaMatrix <- log2(exp(1))*betaRes$beta_mat
-  colnames(betaMatrix) <- modelMatrixNames
-  colnames(modelMatrix) <- modelMatrixNames
-  # warn below regarding these rows with negative variance
-  betaSE <- log2(exp(1))*sqrt(pmax(betaRes$beta_var_mat,0))
-  colnames(betaSE) <- paste0("SE_",modelMatrixNames)
-
-  # switch based on whether we should also use optim
-  # on rows which did not converge
-  rowsForOptim <- if (useOptim) {
-    which(!betaConv | !rowStable | !rowVarPositive)
-  } else {
-    which(!rowStable | !rowVarPositive)
-  }
-  
-  if (forceOptim) {
-    rowsForOptim <- seq_along(betaConv)
-  }
-  
-  if (length(rowsForOptim) > 0) {
-    # we use optim if didn't reach convergence with the IRLS code
-    resOptim <- fitNbinomGLMsOptim(object,modelMatrix,lambda,
-                                   rowsForOptim,rowStable,
-                                   normalizationFactors,alpha_hat,
-                                   betaMatrix,betaSE,betaConv,
-                                   beta_mat,
-                                   mu,logLike)
-    betaMatrix <- resOptim$betaMatrix
-    betaSE <- resOptim$betaSE
-    betaConv <- resOptim$betaConv
-    mu <- resOptim$mu
-    logLike <- resOptim$logLike
-  }
-
-  stopifnot(!any(is.na(betaSE)))
-  nNonposVar <- sum(rowSums(betaSE == 0) > 0)
-  if (warnNonposVar & nNonposVar > 0) warning(nNonposVar,"rows had non-positive estimates of variance for coefficients")
-  
-  list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
-       betaSE = betaSE, mu = mu, betaIter = betaRes$iter,
-       deviance = betaRes$deviance,
-       modelMatrix=modelMatrix, 
-       nterms=ncol(modelMatrix), hat_diagonals=betaRes$hat_diagonals)
-}
-
-
-# Fit dispersions for negative binomial GLM
-#
-# This function estimates the dispersion parameter (alpha) for negative binomial
-# generalized linear models. The fitting is performed on the log scale.
-#
-# ySEXP n by m matrix of counts
-# xSEXP m by k design matrix
-# mu_hatSEXP n by m matrix, the expected mean values, given beta-hat
-# log_alphaSEXP n length vector of initial guesses for log(alpha)
-# log_alpha_prior_meanSEXP n length vector of the fitted values for log(alpha)
-# log_alpha_prior_sigmasqSEXP a single numeric value for the variance of the prior
-# min_log_alphaSEXP the minimum value of log alpha
-# kappa_0SEXP a parameter used in calculting the initial proposal
-#   for the backtracking search
-#   initial proposal = log(alpha) + kappa_0 * deriv. of log lik. w.r.t. log(alpha)
-# tolSEXP tolerance for convergence in estimates
-# maxitSEXP maximum number of iterations
-# use_priorSEXP boolean variable, whether to use a prior or just calculate the MLE
-#
-# return a list with elements: log_alpha, iter, iter_accept, last_change, initial_lp, intial_dlp, last_lp, last_dlp, last_d2lp
-fitDispWrapper <- function (ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP,
-                            log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP,
-                            tolSEXP, maxitSEXP, use_priorSEXP) {
-  # test for any NAs in arguments
-  arg.names <- names(formals(fitDispWrapper))
-  na.test <- sapply(mget(arg.names), function(x) any(is.na(x)))
-  if (any(na.test)) stop(paste("in call to fitDisp, the following arguments contain NA:",
-                               paste(arg.names[na.test],collapse=", ")))
-  fitDisp(ySEXP=ySEXP, xSEXP=xSEXP, mu_hatSEXP=mu_hatSEXP,
-          log_alphaSEXP=log_alphaSEXP, log_alpha_prior_meanSEXP=log_alpha_prior_meanSEXP,
-          log_alpha_prior_sigmasqSEXP=log_alpha_prior_sigmasqSEXP,
-          min_log_alphaSEXP=min_log_alphaSEXP, kappa_0SEXP=kappa_0SEXP,
-          tolSEXP=tolSEXP, maxitSEXP=maxitSEXP, use_priorSEXP=use_priorSEXP)
-}
-
-
-# Fit beta coefficients for negative binomial GLM
-#
-# This function estimates the coefficients (betas) for negative binomial generalized linear models.
-#
-# ySEXP n by m matrix of counts
-# xSEXP m by k design matrix
-# nfSEXP n by m matrix of normalization factors
-# alpha_hatSEXP n length vector of the disperion estimates
-# contrastSEXP a k length vector for a possible contrast
-# beta_matSEXP n by k matrix of the initial estimates for the betas
-# lambdaSEXP k length vector of the ridge values
-# tolSEXP tolerance for convergence in estimates
-# maxitSEXP maximum number of iterations
-# useQRSEXP whether to use QR decomposition
-#
-# Note: at this level the betas are on the natural log scale
-fitBetaWrapper <- function (ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP,
-                            beta_matSEXP, lambdaSEXP, tolSEXP, maxitSEXP, useQRSEXP) {
-  if ( missing(contrastSEXP) ) {
-    # contrast is not required, just give 1,0,0,...
-    contrastSEXP <- c(1,rep(0,ncol(xSEXP)-1))
-  }
-  # test for any NAs in arguments
-  arg.names <- names(formals(fitBetaWrapper))
-  na.test <- sapply(mget(arg.names), function(x) any(is.na(x)))
-  if (any(na.test)) stop(paste("in call to fitBeta, the following arguments contain NA:",
-                               paste(arg.names[na.test],collapse=", ")))
-  
-  fitBeta(ySEXP=ySEXP, xSEXP=xSEXP, nfSEXP=nfSEXP, alpha_hatSEXP=alpha_hatSEXP,
-          contrastSEXP=contrastSEXP, beta_matSEXP=beta_matSEXP,
-          lambdaSEXP=lambdaSEXP, tolSEXP=tolSEXP, maxitSEXP=maxitSEXP,
-          useQRSEXP=useQRSEXP)
-}
-
-
 # convenience function for building results tables
 # out of a list and filling in NA rows
 buildDataFrameWithNARows <- function(resultsList, NArows) {
@@ -2290,11 +2018,6 @@ nOrMoreInCell <- function(modelMatrix, n) {
   numEqual >= n
 }
 
-# returns TRUE or FALSE if removing row would leave matrix full rank
-## leaveOneOutFullRank <- function(modelMatrix) {
-##   sapply(seq_len(nrow(modelMatrix)), function(i) qr(modelMatrix[-i,,drop=FALSE])$rank) == ncol(modelMatrix)
-## }
-
 
 # an unexported diagnostic function
 # to retrieve the covariance matrix
@@ -2343,221 +2066,6 @@ matchWeightedUpperQuantileForVariance <- function(x, weights, upperQuantile=.05)
   unname(sdEst)^2
 }
 
-
-
-
-
-# this function calls fitNbinomGLMs() twice:
-# 1 - without the beta prior, in order to calculate the
-#     beta prior variance and hat matrix
-# 2 - again but with the prior in order to get beta matrix and standard errors
-fitGLMsWithPrior <- function(object, maxit, useOptim, useQR, betaPriorVar) {
-  
-  objectNZ <- object[!mcols(object)$allZero,,drop=FALSE]
-  modelMatrixType <- attr(object, "modelMatrixType")
-  
-  if (missing(betaPriorVar) | !("H" %in% assayNames(objectNZ))) {
-    # first, fit the negative binomial GLM without a prior,
-    # used to construct the prior variances
-    # and for the hat matrix diagonals for calculating Cook's distance
-    fit <- fitNbinomGLMs(objectNZ, maxit=maxit, useOptim=useOptim, useQR=useQR,
-                         renameCols = (modelMatrixType == "standard"))
-    modelMatrix <- fit$modelMatrix
-    modelMatrixNames <- colnames(modelMatrix)
-    H <- fit$hat_diagonal
-    betaMatrix <- fit$betaMatrix
-
-    modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
-    modelMatrixNames <- make.names(modelMatrixNames)
-    colnames(betaMatrix) <- modelMatrixNames
-    
-    # save the MLE log fold changes for addMLE argument of results
-    convertNames <- renameModelMatrixColumns(colData(object),
-                                             design(objectNZ))
-    convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
-    modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
-    mleBetaMatrix <- fit$betaMatrix
-    colnames(mleBetaMatrix) <- paste0("MLE_",modelMatrixNames)
-
-    # store for use in estimateBetaPriorVar below
-    mcols(objectNZ) <- cbind(mcols(objectNZ), DataFrame(mleBetaMatrix))
-  } else {
-    # we can skip the first MLE fit because the
-    # beta prior variance and hat matrix diagonals were provided
-    modelMatrix <- getModelMatrix(object)
-    H <- assays(objectNZ)[["H"]]
-    mleBetaMatrix <- as.matrix(mcols(objectNZ)[,grep("MLE_",names(mcols(objectNZ))),drop=FALSE])
-  }
-     
-  if (missing(betaPriorVar)) {
-    betaPriorVar <- estimateBetaPriorVar(objectNZ)
-  } else {
-    # else we are provided the prior variance:
-    # check if the lambda is the correct length
-    # given the design formula
-    if (modelMatrixType == "expanded") {
-      modelMatrix <- makeExpandedModelMatrix(objectNZ)
-    }
-    p <- ncol(modelMatrix)
-    if (length(betaPriorVar) != p) {
-      stop(paste("betaPriorVar should have length",p,"to match:",paste(colnames(modelMatrix),collapse=", ")))
-    }
-  }
-  
-  # refit the negative binomial GLM with a prior on betas
-  if (any(betaPriorVar == 0)) {
-    stop("beta prior variances are equal to zero for some variables")
-  }
-  lambda <- 1/betaPriorVar
-
-  if (modelMatrixType == "standard") {
-    fit <- fitNbinomGLMs(objectNZ, lambda=lambda, maxit=maxit, useOptim=useOptim,
-                         useQR=useQR)
-    modelMatrix <- fit$modelMatrix
-  } else {
-    modelMatrix <- makeExpandedModelMatrix(objectNZ)
-    fit <- fitNbinomGLMs(objectNZ, lambda=lambda, maxit=maxit, useOptim=useOptim,
-                         useQR=useQR, modelMatrix=modelMatrix, renameCols=FALSE)
-  }
-
-  res <- list(fit=fit, H=H, betaPriorVar=betaPriorVar,
-              modelMatrix=modelMatrix, mleBetaMatrix=mleBetaMatrix)
-  res
-}
-
-
-# breaking out the optim backup code from fitNbinomGLMs
-fitNbinomGLMsOptim <- function(object,modelMatrix,lambda,
-                               rowsForOptim,rowStable,
-                               normalizationFactors,alpha_hat,
-                               betaMatrix,betaSE,betaConv,
-                               beta_mat,
-                               mu,logLike) {
-  scaleCols <- apply(modelMatrix,2,function(z) max(abs(z)))
-  stopifnot(all(scaleCols > 0))
-  x <- sweep(modelMatrix,2,scaleCols,"/")
-  lambdaColScale <- lambda / scaleCols^2
-  lambdaColScale <- ifelse(lambdaColScale == 0, 1e-6, lambdaColScale)
-  lambdaLogScale <- lambda / log(2)^2
-  lambdaLogScaleColScale <- lambdaLogScale / scaleCols^2
-  large <- 30
-  for (row in rowsForOptim) {
-    betaRow <- if (rowStable[row] & all(abs(betaMatrix[row,]) < large)) {
-      betaMatrix[row,] * scaleCols
-    } else {
-      beta_mat[row,] * scaleCols
-    }
-    nf <- normalizationFactors[row,]
-    k <- counts(object)[row,]
-    alpha <- alpha_hat[row]
-    objectiveFn <- function(p) {
-      mu_row <- as.numeric(nf * 2^(x %*% p))
-      logLike <- sum(dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE))
-      logPrior <- sum(dnorm(p,0,sqrt(1/lambdaColScale),log=TRUE))
-      negLogPost <- -1 * (logLike + logPrior)
-      if (is.finite(negLogPost)) negLogPost else 10^300
-    }
-    o <- optim(betaRow, objectiveFn, method="L-BFGS-B",lower=-large, upper=large)
-    ridge <- if (length(lambdaLogScale) > 1) {
-      diag(lambdaLogScaleColScale)
-    } else {
-      as.matrix(lambdaLogScaleColScale,ncol=1)
-    }
-    # if we converged, change betaConv to TRUE
-    if (o$convergence == 0) {
-      betaConv[row] <- TRUE
-    }
-    # with or without convergence, store the estimate from optim
-    betaMatrix[row,] <- o$par / scaleCols
-    # calculate the standard errors
-    mu_row <- as.numeric(nf * 2^(x %*% o$par))
-    minmu <- 0.5
-    mu_row[mu_row < minmu] <- minmu
-    w <- diag((mu_row^-1 + alpha)^-1)
-    xtwx <- t(x) %*% w %*% x
-    xtwxRidgeInv <- solve(xtwx + ridge)
-    sigma <- xtwxRidgeInv %*% xtwx %*% xtwxRidgeInv
-    # warn below regarding these rows with negative variance
-    betaSE[row,] <- log2(exp(1)) * sqrt(pmax(diag(sigma),0)) / scaleCols
-    # store the new mu vector
-    mu[row,] <- mu_row
-    logLike[row] <- sum(dnbinom(k, mu=mu_row, size=1/alpha, log=TRUE))
-  }
-  return(list(betaMatrix=betaMatrix,betaSE=betaSE,
-              betaConv=betaConv,
-              mu=mu,logLike=logLike))
-}
-
-
-### DEPRECATED ###
-# backup function in case dispersion doesn't converge
-# this functionality is now implemented in Cpp below
-fitDispInR <- function(y, x, mu, logAlphaPriorMean,
-                       logAlphaPriorSigmaSq, usePrior) {
-  disp <- numeric(nrow(y))
-  # function to evaluate posterior
-  logPost <- function(logAlpha) {
-    alpha <- exp(logAlpha)
-    w <- diag(1/(1/murow^2 * ( murow + alpha * murow^2 )))
-    logLike <- sum(dnbinom(yrow, mu=murow, size=1/alpha, log=TRUE))
-    coxReid <- -.5*(log(det(t(x) %*% w %*% x)))
-    logPrior <- if (usePrior) {
-      dnorm(logAlpha, logAlphaPriorMeanRow, sqrt(logAlphaPriorSigmaSq), log=TRUE)
-    } else {
-      0
-    } 
-    (logLike + coxReid + logPrior)
-  }
-
-  maxDisp <- max(10, ncol(y))
-  s <- seq(from=log(1e-8),to=log(maxDisp),length=15)
-  delta <- s[2] - s[1]
-  # loop through rows
-  for (i in seq_len(nrow(y))) {
-    murow <- mu[i,]
-    yrow <- y[i,]
-    logAlphaPriorMeanRow <- logAlphaPriorMean[i]
-    lpo <- sapply(s, logPost)
-    sfine <- seq(from=s[which.max(lpo)]-delta, to=s[which.max(lpo)]+delta, length=15)
-    lpofine <- sapply(sfine, logPost)
-    disp[i] <- exp(sfine[which.max(lpofine)])
-  }
-  disp
-}
-
-# Fit dispersions by evaluating over grid
-#
-# This function estimates the dispersion parameter (alpha) for negative binomial
-# generalized linear models. The fitting is performed on the log scale.
-#
-# ySEXP n by m matrix of counts
-# xSEXP m by k design matrix
-# mu_hatSEXP n by m matrix, the expected mean values, given beta-hat
-# disp_gridSEXP the grid over which to estimate
-# log_alpha_prior_meanSEXP n length vector of the fitted values for log(alpha)
-# log_alpha_prior_sigmasqSEXP a single numeric value for the variance of the prior
-# use_priorSEXP boolean variable, whether to use a prior or just calculate the MLE
-#
-# return a list with elements: 
-fitDispGridWrapper <- function(y, x, mu, logAlphaPriorMean,
-                               logAlphaPriorSigmaSq, usePrior) {
-  # test for any NAs in arguments
-  arg.names <- names(formals(fitDispGridWrapper))
-  na.test <- sapply(mget(arg.names), function(x) any(is.na(x)))
-  if (any(na.test)) stop(paste("in call to fitDispGridWrapper, the following arguments contain NA:",
-                               paste(arg.names[na.test],collapse=", ")))
-  minLogAlpha <- log(1e-8)
-  maxLogAlpha <- log(max(10, ncol(y)))
-  dispGrid <- seq(from=minLogAlpha, to=maxLogAlpha, length=15)
-  logAlpha <- fitDispGrid(ySEXP=y, xSEXP=x, mu_hatSEXP=mu, disp_gridSEXP=dispGrid,
-                          log_alpha_prior_meanSEXP=logAlphaPriorMean,
-                          log_alpha_prior_sigmasqSEXP=logAlphaPriorSigmaSq,
-                          use_priorSEXP=usePrior)$log_alpha
-  exp(logAlpha)
-}
-
-
-
 # rough dispersion estimate using counts and fitted values
 roughDispEstimate <- function(y, x) {
 
@@ -2591,7 +2099,6 @@ modelMatrixGroups <- function(x) {
   factor(unname(apply(x, 1, paste0, collapse="__")))
 }
 
-# fast, rough estimation of means for rough dispersion estimation (above)
 linearModelMu <- function(y, x) {
   qrx <- qr(x)
   Q <- qr.Q(qrx)
@@ -2609,7 +2116,7 @@ linearModelMuNormalized <- function(object, x) {
 }
 
 # checks for LRT formulas, written as function to remove duplicate code
-# in DESeq() and nbinomLRT()
+# in DESeq and nbinomLRT
 checkLRT <- function(full, reduced) {
   reducedNotInFull <- !all.vars(reduced) %in% all.vars(full)
   if (any(reducedNotInFull)) {
@@ -2666,13 +2173,7 @@ refitWithoutOutliers <- function(object, test, betaPrior, full, reduced,
                                   modelMatrix=modelMatrix,
                                   modelMatrixType=modelMatrixType)
     } else if (test == "LRT") {
-      if (!betaPrior) {
-        objectSub <- nbinomLRT(objectSub, full=full, reduced=reduced, quiet=quiet)
-      } else {
-        betaPriorVar <- attr(object, "betaPriorVar")
-        objectSub <- nbinomLRT(objectSub, full=full, reduced=reduced, betaPrior=betaPrior,
-                               betaPriorVar=betaPriorVar, quiet=quiet)
-      }
+      objectSub <- nbinomLRT(objectSub, full=full, reduced=reduced, quiet=quiet)
     }
     
     idx <- match(names(mcols(objectSub)), names(mcols(object)))
@@ -2828,3 +2329,39 @@ and then provide your custom matrix to 'full' argument of DESeq.
 getModelMatrix <- function(object) {
   stats::model.matrix.default(design(object), data=as.data.frame(colData(object)))
 }
+
+getAndCheckWeights <- function(object, modelMatrix) {
+  if ("weights" %in% assayNames(object)) {
+    useWeights <- TRUE
+    weights <- unname(assays(object)[["weights"]])
+    stopifnot(all(weights >= 0))
+    weights <- weights / apply(weights, 1, max)
+    # some code for testing whether still full rank
+    # only performed once per analysis, by setting object attribute
+    if (is.null(attr(object, "weightsOK"))) {
+      m <- ncol(modelMatrix)
+      full.rank <- qr(modelMatrix)$rank == m
+      weights.ok <- logical(nrow(weights))
+      if (full.rank) {
+        for (i in seq_len(nrow(weights))) {
+          weights.ok[i] <- qr(weights[i,] * modelMatrix)$rank == m
+        }
+      } else {
+        # model matrix is not full rank,
+        # e.g. expanded model matrix from betaPrior=TRUE:
+        # just check zero columns
+        weights.ok <- rep(TRUE, nrow(weights))
+        for (j in seq_len(ncol(modelMatrix))) {
+          num.zero <- colSums(t(weights) * modelMatrix[,j] == 0)
+          weights.ok <- weights.ok & (num.zero != nrow(modelMatrix))
+        }
+      }
+      stopifnot(all(weights.ok))
+    }
+    attr(object, "weightsOK") <- TRUE
+  } else {
+    useWeights <- FALSE
+    weights <- matrix(1, nrow=nrow(object), ncol=ncol(object))
+  }
+  list(weights=weights,useWeights=useWeights)
+}
diff --git a/R/fitNbinomGLMs.R b/R/fitNbinomGLMs.R
new file mode 100644
index 0000000..777b1db
--- /dev/null
+++ b/R/fitNbinomGLMs.R
@@ -0,0 +1,375 @@
+# Unexported, low-level function for fitting negative binomial GLMs
+#
+# Users typically call \code{\link{nbinomWaldTest}} or \code{\link{nbinomLRT}}
+# which calls this function to perform fitting.  These functions return
+# a \code{\link{DESeqDataSet}} object with the appropriate columns
+# added.  This function returns results as a list.
+#
+# object a DESeqDataSet
+# modelMatrix the design matrix
+# modelFormula a formula specifying how to construct the design matrix
+# alpha_hat the dispersion parameter estimates
+# lambda the 'ridge' term added for the penalized GLM on the log2 scale
+# renameCols whether to give columns variable_B_vs_A style names
+# betaTol control parameter: stop when the following is satisfied:
+#   abs(dev - dev_old)/(abs(dev) + 0.1) < betaTol
+# maxit control parameter: maximum number of iteration to allow for
+#   convergence
+# useOptim whether to use optim on rows which have not converged:
+#   Fisher scoring is not ideal with multiple groups and sparse
+#   count distributions
+# useQR whether to use the QR decomposition on the design matrix X
+# forceOptim whether to use optim on all rows
+# warnNonposVar whether to warn about non positive variances,
+#   for advanced users only running LRT without beta prior,
+#   this might be desirable to be ignored.
+#
+# return a list of results, with coefficients and standard
+# errors on the log2 scale
+fitNbinomGLMs <- function(object, modelMatrix=NULL, modelFormula, alpha_hat, lambda,
+                          renameCols=TRUE, betaTol=1e-8, maxit=100, useOptim=TRUE,
+                          useQR=TRUE, forceOptim=FALSE, warnNonposVar=TRUE) {
+  if (missing(modelFormula)) {
+    modelFormula <- design(object)
+  }
+  if (is.null(modelMatrix)) {
+    modelAsFormula <- TRUE
+    modelMatrix <- stats::model.matrix.default(modelFormula,
+                     data=as.data.frame(colData(object)))
+  } else {
+    modelAsFormula <- FALSE
+  }
+
+  stopifnot(all(colSums(abs(modelMatrix)) > 0))
+
+  # rename columns, for use as columns in DataFrame
+  # and to emphasize the reference level comparison
+  modelMatrixNames <- colnames(modelMatrix)
+  modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
+  modelMatrixNames <- make.names(modelMatrixNames)
+  
+  if (renameCols) {
+    convertNames <- renameModelMatrixColumns(colData(object),
+                                             modelFormula)
+    convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
+    modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
+  }
+  colnames(modelMatrix) <- modelMatrixNames
+  
+  normalizationFactors <- getSizeOrNormFactors(object)
+  
+  if (missing(alpha_hat)) {
+    alpha_hat <- dispersions(object)
+  }
+
+  if (length(alpha_hat) != nrow(object)) {
+    stop("alpha_hat needs to be the same length as nrows(object)")
+  }
+
+  # set a wide prior for all coefficients
+  if (missing(lambda)) {
+    lambda <- rep(1e-6, ncol(modelMatrix))
+  }
+
+  # use weights if they are present in assays(object)
+  wlist <- getAndCheckWeights(object, modelMatrix)
+  weights <- wlist$weights
+  useWeights <- wlist$useWeights
+  
+  # bypass the beta fitting if the model formula is only intercept and
+  # the prior variance is large (1e6)
+  # i.e., LRT with reduced ~ 1 and no beta prior
+  justIntercept <- if (modelAsFormula) {
+    modelFormula == formula(~ 1)
+  } else {
+    ncol(modelMatrix) == 1 & all(modelMatrix == 1)
+  }
+  if (justIntercept & all(lambda <= 1e-6)) {
+      alpha <- alpha_hat
+      betaConv <- rep(TRUE, nrow(object))
+      betaIter <- rep(1,nrow(object))
+      betaMatrix <- if (useWeights) {
+                      matrix(log2(rowSums(weights*counts(object, normalized=TRUE))
+                                  /rowSums(weights)),ncol=1)
+                    } else {
+                      matrix(log2(rowMeans(counts(object, normalized=TRUE))),ncol=1)
+                    }
+      mu <- normalizationFactors * as.numeric(2^betaMatrix)
+      logLikeMat <- dnbinom(counts(object), mu=mu, size=1/alpha, log=TRUE)
+      logLike <- if (useWeights) {
+                   rowSums(weights*logLikeMat)
+                 } else {
+                   rowSums(logLikeMat)
+                 }
+      modelMatrix <- stats::model.matrix.default(~ 1, as.data.frame(colData(object)))
+      colnames(modelMatrix) <- modelMatrixNames <- "Intercept"
+      w <- if (useWeights) {
+             weights * (mu^-1 + alpha)^-1
+           } else {
+             (mu^-1 + alpha)^-1
+           }
+      xtwx <- rowSums(w)
+      sigma <- xtwx^-1
+      betaSE <- matrix(log2(exp(1)) * sqrt(sigma),ncol=1)      
+      hat_diagonals <- w * xtwx^-1;
+      res <- list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
+                  betaSE = betaSE, mu = mu, betaIter = betaIter,
+                  modelMatrix=modelMatrix, 
+                  nterms=1, hat_diagonals=hat_diagonals)
+      return(res)
+  }
+  
+  qrx <- qr(modelMatrix)
+  # if full rank, estimate initial betas for IRLS below
+  if (qrx$rank == ncol(modelMatrix)) {
+    Q <- qr.Q(qrx)
+    R <- qr.R(qrx)
+    y <- t(log(counts(object,normalized=TRUE) + .1))
+    beta_mat <- t(solve(R, t(Q) %*% y))
+  } else {
+    if ("Intercept" %in% modelMatrixNames) {
+      beta_mat <- matrix(0, ncol=ncol(modelMatrix), nrow=nrow(object))
+      # use the natural log as fitBeta occurs in the natural log scale
+      logBaseMean <- log(rowMeans(counts(object,normalized=TRUE)))
+      beta_mat[,which(modelMatrixNames == "Intercept")] <- logBaseMean
+    } else {
+      beta_mat <- matrix(1, ncol=ncol(modelMatrix), nrow=nrow(object))
+    }
+  }
+  
+  # here we convert from the log2 scale of the betas
+  # and the beta prior variance to the log scale
+  # used in fitBeta.
+  # so we divide by the square of the
+  # conversion factor, log(2)
+  lambdaNatLogScale <- lambda / log(2)^2
+  
+  betaRes <- fitBetaWrapper(ySEXP = counts(object), xSEXP = modelMatrix,
+                            nfSEXP = normalizationFactors,
+                            alpha_hatSEXP = alpha_hat,
+                            beta_matSEXP = beta_mat,
+                            lambdaSEXP = lambdaNatLogScale,
+                            weightsSEXP = weights,
+                            useWeightsSEXP = useWeights,
+                            tolSEXP = betaTol, maxitSEXP = maxit,
+                            useQRSEXP=useQR)
+
+  # Note on deviance: the 'deviance' calculated in fitBeta() (C++)
+  # is not returned in mcols(object)$deviance. instead, we calculate
+  # the log likelihood below and use -2 * logLike.
+  # (reason is that we have other ways of estimating beta:
+  # above intercept code, and below optim code)
+  
+  mu <- normalizationFactors * t(exp(modelMatrix %*% t(betaRes$beta_mat)))
+  dispersionVector <- rep(dispersions(object), times=ncol(object))
+  logLike <- nbinomLogLike(counts(object), mu, dispersions(object), weights, useWeights)
+
+  # test for stability
+  rowStable <- apply(betaRes$beta_mat,1,function(row) sum(is.na(row))) == 0
+
+  # test for positive variances
+  rowVarPositive <- apply(betaRes$beta_var_mat,1,function(row) sum(row <= 0)) == 0
+  
+  # test for convergence, stability and positive variances
+  betaConv <- betaRes$iter < maxit
+  
+  # here we transform the betaMatrix and betaSE to a log2 scale
+  betaMatrix <- log2(exp(1))*betaRes$beta_mat
+  colnames(betaMatrix) <- modelMatrixNames
+  colnames(modelMatrix) <- modelMatrixNames
+  # warn below regarding these rows with negative variance
+  betaSE <- log2(exp(1))*sqrt(pmax(betaRes$beta_var_mat,0))
+  colnames(betaSE) <- paste0("SE_",modelMatrixNames)
+
+  # switch based on whether we should also use optim
+  # on rows which did not converge
+  rowsForOptim <- if (useOptim) {
+    which(!betaConv | !rowStable | !rowVarPositive)
+  } else {
+    which(!rowStable | !rowVarPositive)
+  }
+  
+  if (forceOptim) {
+    rowsForOptim <- seq_along(betaConv)
+  }
+  
+  if (length(rowsForOptim) > 0) {
+    # we use optim if didn't reach convergence with the IRLS code
+    resOptim <- fitNbinomGLMsOptim(object,modelMatrix,lambda,
+                                   rowsForOptim,rowStable,
+                                   normalizationFactors,alpha_hat,
+                                   weights,useWeights,
+                                   betaMatrix,betaSE,betaConv,
+                                   beta_mat,
+                                   mu,logLike)
+    betaMatrix <- resOptim$betaMatrix
+    betaSE <- resOptim$betaSE
+    betaConv <- resOptim$betaConv
+    mu <- resOptim$mu
+    logLike <- resOptim$logLike
+  }
+
+  stopifnot(!any(is.na(betaSE)))
+  nNonposVar <- sum(rowSums(betaSE == 0) > 0)
+  if (warnNonposVar & nNonposVar > 0) warning(nNonposVar,"rows had non-positive estimates of variance for coefficients")
+  
+  list(logLike = logLike, betaConv = betaConv, betaMatrix = betaMatrix,
+       betaSE = betaSE, mu = mu, betaIter = betaRes$iter, modelMatrix=modelMatrix, 
+       nterms=ncol(modelMatrix), hat_diagonals=betaRes$hat_diagonals)
+}
+
+# this function calls fitNbinomGLMs() twice:
+# 1 - without the beta prior, in order to calculate the
+#     beta prior variance and hat matrix
+# 2 - again but with the prior in order to get beta matrix and standard errors
+fitGLMsWithPrior <- function(object, betaTol, maxit, useOptim, useQR, betaPriorVar) {
+  
+  objectNZ <- object[!mcols(object)$allZero,,drop=FALSE]
+  modelMatrixType <- attr(object, "modelMatrixType")
+
+  if (missing(betaPriorVar) | !(all(c("mu","H") %in% assayNames(objectNZ)))) {
+    # first, fit the negative binomial GLM without a prior,
+    # used to construct the prior variances
+    # and for the hat matrix diagonals for calculating Cook's distance
+    fit <- fitNbinomGLMs(objectNZ,
+                         betaTol=betaTol, maxit=maxit,
+                         useOptim=useOptim, useQR=useQR,
+                         renameCols = (modelMatrixType == "standard"))
+    modelMatrix <- fit$modelMatrix
+    modelMatrixNames <- colnames(modelMatrix)
+    H <- fit$hat_diagonal
+    betaMatrix <- fit$betaMatrix
+    mu <- fit$mu
+
+    modelMatrixNames[modelMatrixNames == "(Intercept)"] <- "Intercept"
+    modelMatrixNames <- make.names(modelMatrixNames)
+    colnames(betaMatrix) <- modelMatrixNames
+    
+    # save the MLE log fold changes for addMLE argument of results
+    convertNames <- renameModelMatrixColumns(colData(object),
+                                             design(objectNZ))
+    convertNames <- convertNames[convertNames$from %in% modelMatrixNames,,drop=FALSE]
+    modelMatrixNames[match(convertNames$from, modelMatrixNames)] <- convertNames$to
+    mleBetaMatrix <- fit$betaMatrix
+    colnames(mleBetaMatrix) <- paste0("MLE_",modelMatrixNames)
+
+    # store for use in estimateBetaPriorVar below
+    mcols(objectNZ) <- cbind(mcols(objectNZ), DataFrame(mleBetaMatrix))
+  } else {
+    # we can skip the first MLE fit because the
+    # beta prior variance and hat matrix diagonals were provided
+    modelMatrix <- getModelMatrix(object)
+    H <- assays(objectNZ)[["H"]]
+    mu <- assays(objectNZ)[["mu"]]
+    mleBetaMatrix <- as.matrix(mcols(objectNZ)[,grep("MLE_",names(mcols(objectNZ))),drop=FALSE])
+  }
+     
+  if (missing(betaPriorVar)) {
+    betaPriorVar <- estimateBetaPriorVar(objectNZ)
+  } else {
+    # else we are provided the prior variance:
+    # check if the lambda is the correct length
+    # given the design formula
+    if (modelMatrixType == "expanded") {
+      modelMatrix <- makeExpandedModelMatrix(objectNZ)
+    }
+    p <- ncol(modelMatrix)
+    if (length(betaPriorVar) != p) {
+      stop(paste("betaPriorVar should have length",p,"to match:",paste(colnames(modelMatrix),collapse=", ")))
+    }
+  }
+  
+  # refit the negative binomial GLM with a prior on betas
+  if (any(betaPriorVar == 0)) {
+    stop("beta prior variances are equal to zero for some variables")
+  }
+  lambda <- 1/betaPriorVar
+
+  if (modelMatrixType == "standard") {
+    fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
+                         betaTol=betaTol, maxit=maxit,
+                         useOptim=useOptim, useQR=useQR)
+    modelMatrix <- fit$modelMatrix
+  } else {
+    modelMatrix <- makeExpandedModelMatrix(objectNZ)
+    fit <- fitNbinomGLMs(objectNZ, lambda=lambda,
+                         betaTol=betaTol, maxit=maxit,
+                         useOptim=useOptim, useQR=useQR,
+                         modelMatrix=modelMatrix, renameCols=FALSE)
+  }
+
+  res <- list(fit=fit, H=H, betaPriorVar=betaPriorVar, mu=mu,
+              modelMatrix=modelMatrix, mleBetaMatrix=mleBetaMatrix)
+  res
+}
+
+# breaking out the optim backup code from fitNbinomGLMs
+fitNbinomGLMsOptim <- function(object,modelMatrix,lambda,
+                               rowsForOptim,rowStable,
+                               normalizationFactors,alpha_hat,
+                               weights,useWeights,
+                               betaMatrix,betaSE,betaConv,
+                               beta_mat,
+                               mu,logLike) {
+  x <- modelMatrix
+  lambdaNatLogScale <- lambda / log(2)^2
+  large <- 30
+  for (row in rowsForOptim) {
+    betaRow <- if (rowStable[row] & all(abs(betaMatrix[row,]) < large)) {
+      betaMatrix[row,]
+    } else {
+      beta_mat[row,]
+    }
+    nf <- normalizationFactors[row,]
+    k <- counts(object)[row,]
+    alpha <- alpha_hat[row]
+    objectiveFn <- function(p) {
+      mu_row <- as.numeric(nf * 2^(x %*% p))
+      logLikeVector <- dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE)
+      logLike <- if (useWeights) {
+                   sum(weights[row,] * logLikeVector)
+                 } else {
+                   sum(logLikeVector)
+                 }
+      logPrior <- sum(dnorm(p,0,sqrt(1/lambda),log=TRUE))
+      negLogPost <- -1 * (logLike + logPrior)
+      if (is.finite(negLogPost)) negLogPost else 10^300
+    }
+    o <- optim(betaRow, objectiveFn, method="L-BFGS-B",lower=-large, upper=large)
+    ridge <- if (length(lambdaNatLogScale) > 1) {
+      diag(lambdaNatLogScale)
+    } else {
+      as.matrix(lambdaNatLogScale,ncol=1)
+    }
+    # if we converged, change betaConv to TRUE
+    if (o$convergence == 0) {
+      betaConv[row] <- TRUE
+    }
+    # with or without convergence, store the estimate from optim
+    betaMatrix[row,] <- o$par
+    # calculate the standard errors
+    mu_row <- as.numeric(nf * 2^(x %*% o$par))
+    # store the new mu vector
+    mu[row,] <- mu_row
+    minmu <- 0.5
+    mu_row[mu_row < minmu] <- minmu
+    w <- if (useWeights) {
+           diag((mu_row^-1 + alpha)^-1)
+         } else {
+           diag(weights[row,] * (mu_row^-1 + alpha)^-1)
+         }
+    xtwx <- t(x) %*% w %*% x
+    xtwxRidgeInv <- solve(xtwx + ridge)
+    sigma <- xtwxRidgeInv %*% xtwx %*% xtwxRidgeInv
+    # warn below regarding these rows with negative variance
+    betaSE[row,] <- log2(exp(1)) * sqrt(pmax(diag(sigma),0))
+    logLikeVector <- dnbinom(k,mu=mu_row,size=1/alpha,log=TRUE)
+    logLike[row] <- if (useWeights) {
+                      sum(weights[row,] * logLikeVector)
+                    } else {
+                      sum(logLikeVector)
+                    }
+  }
+  return(list(betaMatrix=betaMatrix,betaSE=betaSE,
+              betaConv=betaConv,mu=mu,logLike=logLike))
+}
diff --git a/R/helper.R b/R/helper.R
index a0434a5..6eeb73e 100644
--- a/R/helper.R
+++ b/R/helper.R
@@ -1,3 +1,152 @@
+#' Shrink log2 fold changes
+#'
+#' This function adds shrunken log2 fold changes (LFC) to a
+#' results table which was run without LFC moderation.
+#' Note: this function is still being prototyped.
+#'
+#' @param dds a DESeqDataSet object, which has been run through
+#' \code{\link{DESeq}}, or at the least, \code{\link{estimateDispersions}}
+#' @param coef the number of the coefficient (LFC) to shrink,
+#' consult \code{resultsNames(dds)} after running \code{DESeq(dds, betaPrior=FALSE)}.
+#' only \code{coef} or \code{contrast} can be specified, not both
+#' @param contrast see argument description in \code{\link{results}}.
+#' only \code{coef} or \code{contrast} can be specified, not both
+#' @param res a DESeqResults object (can be missing)
+#' @param type at this time, ignored argument, because only one
+#' shrinkage estimator, but more to come!
+#'
+#' @return if \code{res} is not missing, a DESeqResults object with
+#' the \code{log2FoldChange} column replaced with a shrunken LFC.
+#' If \code{res} is missing, just the shrunken LFC vector.
+#'
+#' @export
+#' 
+#' @examples
+#'
+#'  dds <- makeExampleDESeqDataSet(betaSD=1)
+#'  dds <- DESeq(dds, betaPrior=FALSE)
+#'  res <- results(dds)
+#'  res.shr <- lfcShrink(dds=dds, coef=2, res=res)
+#'  res.shr <- lfcShrink(dds=dds, contrast=c("condition","B","A"), res=res)
+#' 
+lfcShrink <- function(dds, coef, contrast, res, type="normal") {
+  if (is.null(dispersions(dds))) {
+    stop("lfcShrink requires dispersion estimates, first call estimateDispersions()")
+  }
+
+  # match the shrinkage type
+  type <- match.arg(type, choices=c("normal"))
+
+  # fit MLE coefficients... TODO skip this step
+  dds <- estimateMLEForBetaPriorVar(dds)
+
+  stopifnot(missing(coef) | missing(contrast))
+  if (missing(contrast)) {
+    modelMatrixType <- "standard"
+  } else {
+    modelMatrixType <- "expanded"
+  }
+  attr(dds,"modelMatrixType") <- modelMatrixType
+  betaPriorVar <- estimateBetaPriorVar(dds)
+
+  dds.shr <- nbinomWaldTest(dds,
+                            betaPrior=TRUE,
+                            betaPriorVar=betaPriorVar,
+                            modelMatrixType=modelMatrixType,
+                            quiet=TRUE)
+
+  if (missing(contrast)) {
+    rn <- resultsNames(dds.shr)
+    res.shr <- results(dds.shr, name=rn[coef])
+  } else {
+    res.shr <- results(dds.shr, contrast=contrast)
+  }
+  
+  if (!missing(res)) {
+    res <- res[,c("baseMean","log2FoldChange","stat","pvalue","padj")]
+    res$log2FoldChange <- res.shr$log2FoldChange
+    mcols(res)$description[2] <- mcols(res.shr)$description[2]
+    return(res)
+  } else {
+    return(res.shr$log2FoldChange)
+  }
+}
+
+#' Unmix samples using loss in a variance stabilized space
+#'
+#' Unmixes samples in \code{x} according to \code{pure} components,
+#' using numerical optimization. The components in \code{pure}
+#' are added on the scale of gene expression (either normalized counts, or TPMs).
+#' The loss function when comparing fitted expression to the
+#' samples in \code{x} occurs in a variance stabilized space.
+#' This task is sometimes referred to as "deconvolution",
+#' and can be used, for example, to identify contributions from
+#' various tissues.
+#' Note: if the \code{pbapply} package is installed a progress bar
+#' will be displayed while mixing components are fit.
+#'
+#' @param x normalized counts or TPMs of the samples to be unmixed
+#' @param pure normalized counts or TPMs of the "pure" samples
+#' @param alpha for normalized counts, the dispersion of the data
+#' when a negative binomial model is fit. this can be found by examining
+#' the asymptotic value of \code{dispersionFunction(dds)}, when using
+#' \code{fitType="parametric"} or the mean value when using
+#' \code{fitType="mean"}.
+#' @param shift for TPMs, the shift which approximately stabilizes the variance
+#' of log shifted TPMs. Can be assessed with \code{vsn::meanSdPlot}.
+#' @param loss either 1 (for L1) or 2 (for squared) loss function.
+#' Default is 1.
+#' @param quiet suppress progress bar. default is FALSE, show progress bar
+#' if pbapply is installed.
+#'
+#' @return mixture components for each sample (rows), which sum to 1.
+#'
+#' @export
+unmix <- function(x, pure, alpha, shift, loss=1, quiet=FALSE) {
+
+  if (missing(alpha)) stopifnot(!missing(shift))
+  if (missing(shift)) stopifnot(!missing(alpha))
+  stopifnot(missing(shift) | missing(alpha))
+  stopifnot(loss %in% 1:2)
+  stopifnot(nrow(x) == nrow(pure))
+  stopifnot(ncol(pure) > 1)
+  
+  if (requireNamespace("pbapply", quietly=TRUE) & !quiet) {
+    lapply <- pbapply::pblapply
+  }
+  
+  if (missing(shift)) {
+    stopifnot(alpha > 0)
+    # variance stabilizing transformation for NB w/ fixed dispersion alpha
+    vst <- function(q, alpha) ( 2 * asinh(sqrt(alpha * q)) - log(alpha) - log(4) ) / log(2)
+    distVST <- function(p, i, vst, alpha, loss) {
+      sum(abs(vst(x[,i], alpha) - vst(pure %*% p, alpha))^loss)
+    }
+    res <- lapply(seq_len(ncol(x)), function(i) {
+      optim(par=rep(1, ncol(pure)), fn=distVST, gr=NULL, i, vst, alpha, loss,
+            method="L-BFGS-B", lower=0, upper=100)$par
+    })
+  } else {
+    stopifnot(shift > 0)
+    # VST of shifted log
+    vstSL <- function(q, shift) log(q + shift)
+    distSL <- function(p, i, vst, shift, loss) {
+      sum(abs(vstSL(x[,i], shift) - vstSL(pure %*% p, shift))^loss)
+    }
+    res <- lapply(seq_len(ncol(x)), function(i) {
+      optim(par=rep(1, ncol(pure)), fn=distSL, gr=NULL, i, vstSL, shift, loss,
+            method="L-BFGS-B", lower=0, upper=100)$par
+    })
+  }
+
+  mix <- do.call(rbind, res)
+  mix <- mix / rowSums(mix)
+  colnames(mix) <- colnames(pure)
+  
+  return(mix)
+  
+}
+
 #' Collapse technical replicates in a RangedSummarizedExperiment or DESeqDataSet
 #'
 #' Collapses the columns in \code{object} by summing within levels
@@ -340,14 +489,7 @@ DESeqParallel <- function(object, test, fitType, betaPrior, full, reduced,
 
     # if so:
 
-    # need to set standard model matrix for LRT with beta prior
-    if (test == "LRT") {
-      attr(object, "modelMatrixType") <- "standard"
-      attr(objectNZ, "modelMatrixType") <- "standard"
-      modelMatrixType <- "standard"
-    }
-
-    # also if explicitly set
+    # if explicitly set
     if (!is.null(modelMatrixType) && modelMatrixType == "standard") {
       attr(object, "modelMatrixType") <- "standard"
       attr(objectNZ, "modelMatrixType") <- "standard"
@@ -371,18 +513,12 @@ DESeqParallel <- function(object, test, fitType, betaPrior, full, reduced,
 
     # the third parallel execution: the final GLM and statistics
     if (!quiet) message(paste("fitting model and testing:",nworkers,"workers"))
-    if (test == "Wald") {
-
-      objectNZ <- do.call(rbind, bplapply(levels(idx), function(l) {
-        nbinomWaldTest(objectNZ[idx == l,,drop=FALSE], betaPriorVar=betaPriorVar,
-                       quiet=TRUE, modelMatrixType=modelMatrixType)
-      }, BPPARAM=BPPARAM))
-    } else if (test == "LRT") {
-      objectNZ <- do.call(rbind, bplapply(levels(idx), function(l) {
-        nbinomLRT(objectNZ[idx == l,,drop=FALSE], full=full, reduced=reduced,
-                  betaPrior=betaPrior, betaPriorVar=betaPriorVar, quiet=TRUE)
-      }, BPPARAM=BPPARAM))
-    }
+    objectNZ <- do.call(rbind, bplapply(levels(idx), function(l) {
+      nbinomWaldTest(objectNZ[idx == l,,drop=FALSE],
+                     betaPrior=TRUE,
+                     betaPriorVar=betaPriorVar,
+                     quiet=TRUE, modelMatrixType=modelMatrixType)
+    }, BPPARAM=BPPARAM))
     
   } else {
     
@@ -396,7 +532,7 @@ DESeqParallel <- function(object, test, fitType, betaPrior, full, reduced,
                                               dispPriorVar=dispPriorVar, quiet=TRUE, modelMatrix=modelMatrix)
         # replace design
         if (noReps) design(objectNZSub) <- designIn
-        nbinomWaldTest(objectNZSub, betaPrior=betaPrior,
+        nbinomWaldTest(objectNZSub, betaPrior=FALSE,
                        quiet=TRUE, modelMatrix=modelMatrix, modelMatrixType="standard")
       }, BPPARAM=BPPARAM))
     } else if (test == "LRT") {
diff --git a/R/methods.R b/R/methods.R
index 33cd238..6d3f1a5 100644
--- a/R/methods.R
+++ b/R/methods.R
@@ -369,18 +369,26 @@ setReplaceMethod("normalizationFactors", signature(object="DESeqDataSet", value=
                    object
                  })
 
-estimateSizeFactors.DESeqDataSet <- function(object, type=c("ratio","iterate"),
-                                             locfunc=stats::median, geoMeans, controlGenes, normMatrix) {
-  type <- match.arg(type, c("ratio","iterate"))
+estimateSizeFactors.DESeqDataSet <- function(object, type=c("ratio","poscounts","iterate"),
+                                             locfunc=stats::median,
+                                             geoMeans, controlGenes, normMatrix) {
+  type <- match.arg(type, c("ratio","poscounts","iterate"))
   # Temporary hack for backward compatibility with "old" DESeqDataSet
   # objects. Remove once all serialized DESeqDataSet objects around have
   # been updated.
-  if (!.hasSlot(object, "rowRanges"))
+  if (!.hasSlot(object, "rowRanges")) {
     object <- updateObject(object)
+  }
   object <- sanitizeColData(object)
   if (type == "iterate") {
     sizeFactors(object) <- estimateSizeFactorsIterate(object)
   } else {
+    if (type == "poscounts") {
+      geoMeanNZ <- function(x) {
+        if (all(x == 0)) { 0 } else { exp( sum(log(x[x > 0])) / length(x) ) }
+      }
+      geoMeans <- apply(counts(object), 1, geoMeanNZ)
+    }
     if ("avgTxLength" %in% assayNames(object)) {
       nm <- assays(object)[["avgTxLength"]]
       nm <- nm / exp(rowMeans(log(nm))) # divide out the geometric mean
@@ -434,12 +442,17 @@ estimateSizeFactors.DESeqDataSet <- function(object, type=c("ratio","iterate"),
 #' @aliases estimateSizeFactors estimateSizeFactors,DESeqDataSet-method
 #' 
 #' @param object a DESeqDataSet
-#' @param type either "ratio" or "iterate". "ratio" uses the standard
-#' median ratio method introduced in DESeq. The size factor is the
-#' median ratio of the sample over a pseudosample: for each gene, the geometric mean
-#' of all samples. "iterate" offers an alternative estimator, which can be
-#' used even when all genes contain a sample with a zero. This estimator
-#' iterates between estimating the dispersion with a design of ~1, and
+#' @param type Method for estimation: either "ratio", "poscounts", or "iterate".
+#' "ratio" uses the standard median ratio method introduced in DESeq. The size factor is the
+#' median ratio of the sample over a "pseudosample": for each gene, the geometric mean
+#' of all samples.
+#' "poscounts" and "iterate" offer alternative estimators, which can be
+#' used even when all genes contain a sample with a zero (a problem for the
+#' default method, as the geometric mean becomes zero, and the ratio undefined).
+#' The "poscounts" estimator deals with a gene with some zeros, by calculating a
+#' modified geometric mean by taking the n-th root of the product of the non-zero counts.
+#' This evolved out of use cases with Paul McMurdie's phyloseq package for metagenomic samples.
+#' The "iterate" estimator iterates between estimating the dispersion with a design of ~1, and
 #' finding a size factor vector by numerically optimizing the likelihood
 #' of the ~1 model.
 #' @param locfunc a function to compute a location for a sample. By default, the
@@ -740,10 +753,11 @@ coef.DESeqDataSet  <- function(object, SE=FALSE, ...) {
 #' \method{summary}{DESeqResults}(object, alpha, \dots)
 #' 
 #' @param object a \code{\link{DESeqResults}} object
-#' @param alpha the adjusted p-value cutoff. if not set, this
+#' @param alpha the adjusted p-value cutoff. If not set, this
 #' defaults to the \code{alpha} argument which was used in
 #' \code{\link{results}} to set the target FDR for independent
-#' filtering.
+#' filtering, or if independent filtering was not performed,
+#' to 0.1.
 #' @param ... additional arguments
 #'
 #' @docType methods
diff --git a/R/plots.R b/R/plots.R
index fbf7b52..cf4e66e 100644
--- a/R/plots.R
+++ b/R/plots.R
@@ -1,15 +1,24 @@
-plotDispEsts.DESeqDataSet <- function( object, ymin,
+plotDispEsts.DESeqDataSet <- function( object, ymin, CV=FALSE,
   genecol = "black", fitcol = "red", finalcol = "dodgerblue",
   legend=TRUE, xlab, ylab, log = "xy", cex = 0.45, ... )
 {
   if (missing(xlab)) xlab <- "mean of normalized counts"
-  if (missing(ylab)) ylab <- "dispersion"
+  if (missing(ylab)) {
+    if (CV) {
+      ylab <- "coefficient of variation"
+    } else {
+      ylab <- "dispersion"
+    }
+  }
   
   px = mcols(object)$baseMean
   sel = (px>0)
   px = px[sel]
 
-  py = mcols(object)$dispGeneEst[sel]
+  # transformation of dispersion into CV or not
+  f <- if (CV) sqrt else I
+  
+  py = f(mcols(object)$dispGeneEst[sel])
   if(missing(ymin))
       ymin = 10^floor(log10(min(py[py>0], na.rm=TRUE))-0.1)
 
@@ -21,12 +30,12 @@ plotDispEsts.DESeqDataSet <- function( object, ymin,
   cexOutlier <- ifelse(mcols(object)$dispOutlier[sel],2*cex,cex)
   lwdOutlier <- ifelse(mcols(object)$dispOutlier[sel],2,1)
   if (!is.null(dispersions(object))) {
-    points(px, dispersions(object)[sel], col=finalcol, cex=cexOutlier,
+    points(px, f(dispersions(object)[sel]), col=finalcol, cex=cexOutlier,
            pch=pchOutlier, lwd=lwdOutlier)
   }
 
   if (!is.null(mcols(object)$dispFit)) {
-    points(px, mcols(object)$dispFit[sel], col=fitcol, cex=cex, pch=16)
+    points(px, f(mcols(object)$dispFit[sel]), col=fitcol, cex=cex, pch=16)
   }
   
   if (legend) {
@@ -48,6 +57,11 @@ plotDispEsts.DESeqDataSet <- function( object, ymin,
 #' @param object a DESeqDataSet, with dispersions estimated
 #' @param ymin the lower bound for points on the plot, points beyond this
 #'    are drawn as triangles at ymin
+#' @param CV logical, whether to plot the asymptotic or biological
+#' coefficient of variation (the square root of dispersion) on the y-axis.
+#' As the mean grows to infinity, the square root of dispersion gives
+#' the coefficient of variation for the counts. Default is \code{FALSE},
+#' plotting dispersion.
 #' @param genecol the color for gene-wise dispersion estimates
 #' @param fitcol the color of the fitted estimates
 #' @param finalcol the color of the final estimates used for testing
@@ -127,10 +141,12 @@ plotMA.DESeqResults <- function(object, alpha, main="", xlab="mean of normalized
 #' or a \code{DESeqDataSet} processed by \code{\link{DESeq}}, or the
 #' individual functions \code{\link{nbinomWaldTest}} or \code{\link{nbinomLRT}}
 #' @param alpha the significance level for thresholding adjusted p-values
-#' @param MLE whether to plot the MLE (unshrunken estimates), defaults to FALSE.
+#' @param MLE if \code{betaPrior=TRUE} was used,
+#' whether to plot the MLE (unshrunken estimates), defaults to FALSE.
 #' Requires that \code{\link{results}} was run with \code{addMLE=TRUE}.
-#' Note that the MLE will be plotted regardless of this argument, if DESeq() was run
-#' with \code{betaPrior=FALSE}.
+#' Note that the MLE will be plotted regardless of this argument,
+#' if DESeq() was run with \code{betaPrior=FALSE}. See \code{\link{lfcShrink}}
+#' for examples on how to plot shrunken log2 fold changes.
 #' @param main optional title for the plot
 #' @param xlab optional defaults to "mean of normalized counts"
 #' @param ylim optional y limits
@@ -249,22 +265,23 @@ plotPCA.DESeqTransform = function(object, intgroup="condition", ntop=500, return
 #' @export
 setMethod("plotPCA", signature(object="DESeqTransform"), plotPCA.DESeqTransform)
 
-#' Plot of normalized counts for a single gene on log scale
+#' Plot of normalized counts for a single gene
 #'
-#' Note: normalized counts plus a pseudocount of 0.5 are shown.
+#' Normalized counts plus a pseudocount of 0.5 are shown by default.
 #' 
 #' @param dds a \code{DESeqDataSet}
 #' @param gene a character, specifying the name of the gene to plot
 #' @param intgroup interesting groups: a character vector of names in \code{colData(x)} to use for grouping
 #' @param normalized whether the counts should be normalized by size factor
 #' (default is TRUE)
-#' @param transform whether to present log2 counts (TRUE) or
-#' to present the counts on the log scale (FALSE, default)
+#' @param transform whether to have log scale y-axis or not.
+#' defaults to TRUE
 #' @param main as in 'plot'
 #' @param xlab as in 'plot'
 #' @param returnData should the function only return the data.frame of counts and
 #' covariates for custom plotting (default is FALSE)
 #' @param replaced use the outlier-replaced counts if they exist
+#' @param pc pseudocount for log transform
 #' @param ... arguments passed to plot
 #' 
 #' @examples
@@ -274,13 +291,19 @@ setMethod("plotPCA", signature(object="DESeqTransform"), plotPCA.DESeqTransform)
 #' 
 #' @export
 plotCounts <- function(dds, gene, intgroup="condition",
-                       normalized=TRUE, transform=FALSE,
+                       normalized=TRUE, transform=TRUE,
                        main, xlab="group",
                        returnData=FALSE,
-                       replaced=FALSE, ...) {
+                       replaced=FALSE,
+                       pc, ...) {
   stopifnot(length(gene) == 1 & (is.character(gene) | (is.numeric(gene) & (gene >= 1 & gene <= nrow(dds)))))
   if (!all(intgroup %in% names(colData(dds)))) stop("all variables in 'intgroup' must be columns of colData")
   stopifnot(returnData | all(sapply(intgroup, function(v) is(colData(dds)[[v]], "factor"))))
+
+  if (missing(pc)) {
+    pc <- if (transform) 0.5 else 0
+  }
+  
   if (is.null(sizeFactors(dds)) & is.null(normalizationFactors(dds))) {
     dds <- estimateSizeFactors(dds)
   }
@@ -297,15 +320,8 @@ plotCounts <- function(dds, gene, intgroup="condition",
     factor(apply( as.data.frame(colData(dds)[, intgroup, drop=FALSE]),
                  1, paste, collapse=" : "))
   }
-  data <- data.frame(count=cnts + .5, group=as.integer(group))
-  if (transform) {
-    data$count <- log2(data$count)
-    ylab <- expression(log[2]~count)
-    logxy <- ""
-  } else {
-    ylab <- ifelse(normalized,"normalized count","count")
-    logxy <- "y"
-  }
+  data <- data.frame(count=cnts + pc, group=as.integer(group))
+  logxy <- if (transform) "y" else "" 
   if (missing(main)) {
     main <- if (is.numeric(gene)) {
       rownames(dds)[gene]
@@ -313,6 +329,7 @@ plotCounts <- function(dds, gene, intgroup="condition",
       gene
     }
   }
+  ylab <- ifelse(normalized,"normalized count","count")
   if (returnData) return(data.frame(count=data$count, colData(dds)[intgroup]))
   plot(data$group + runif(ncol(dds),-.05,.05), data$count, xlim=c(.5,max(data$group)+.5),
        log=logxy, xaxt="n", xlab=xlab, ylab=ylab, main=main, ...)
diff --git a/R/results.R b/R/results.R
index eea7c25..b427f97 100644
--- a/R/results.R
+++ b/R/results.R
@@ -166,10 +166,13 @@
 #' @param test this is automatically detected internally if not provided.
 #' the one exception is after \code{nbinomLRT} has been run, \code{test="Wald"}
 #' will generate Wald statistics and Wald test p-values.
-#' @param addMLE whether the "unshrunken" maximum likelihood estimates (MLE)
+#' @param addMLE if \code{betaPrior=TRUE} was used,
+#' whether the "unshrunken" maximum likelihood estimates (MLE)
 #' of log2 fold change should be added as a column to the results table (default is FALSE).
-#' only applicable when a beta prior was used during the model fitting. only implemented
-#' for 'contrast' for three element character vectors or 'name' for interactions.
+#' This argument is preserved for backward compatability, as now the
+#' recommended pipeline is to generate shrunken MAP estimates using \code{\link{lfcShrink}}.
+#' This argument functionality is only implemented for \code{contrast}
+#' specified as three element character vectors.
 #' @param tidy whether to output the results table with rownames as a first column 'row'.
 #' the table will also be coerced to \code{data.frame}
 #' @param parallel if FALSE, no parallelization. if TRUE, parallel
@@ -415,8 +418,6 @@ of length 3 to 'contrast' instead of using 'name'")
     if (is.list(contrast)) stop("addMLE only implemented for: contrast=c('condition','B','A')")
     res <- cbind(res, mleContrast(object, contrast))
     res <- res[,c("baseMean","log2FoldChange","lfcMLE","lfcSE","stat","pvalue")]
-    # if an all zero contrast, also zero out the lfcMLE
-    res$lfcMLE[ which(res$log2FoldChange == 0 & res$stat == 0) ] <- 0
   }
   
   # only if we need to generate new p-values
@@ -665,12 +666,25 @@ getContrast <- function(object, contrast, useT=FALSE, df) {
     counts(objectNZ)
   }
 
+  # use weights if they are present in assays(object)
+  if ("weights" %in% assayNames(object)) {
+    useWeights <- TRUE
+    weights <- assays(object)[["weights"]]
+    stopifnot(all(weights >= 0))
+    weights <- weights / apply(weights, 1, max)
+  } else {
+    useWeights <- FALSE
+    weights <- matrix(1, nrow=nrow(object), ncol=ncol(object))
+  }
+  
   betaRes <- fitBeta(ySEXP = countsMatrix, xSEXP = modelMatrix,
                      nfSEXP = normalizationFactors,
                      alpha_hatSEXP = alpha_hat,
                      contrastSEXP = contrast,
                      beta_matSEXP = beta_mat,
                      lambdaSEXP = lambda,
+                     weightsSEXP = weights,
+                     useWeightsSEXP = useWeights,
                      tolSEXP = 1e-8, maxitSEXP = 0,
                      useQRSEXP=FALSE) # QR not relevant, fitting loop isn't entered
   # convert back to log2 scale
@@ -816,11 +830,6 @@ cleanContrast <- function(object, contrast, expanded=FALSE, listValues, test) {
                    "are expected to be in resultsNames(object)"))
       }
     }
-
-    # check if both levels have all zero counts
-    # (this has to be down here to make use of error checking above)
-    contrastAllZero <- contrastAllZeroCharacter(object, contrastFactor,
-                         contrastNumLevel, contrastDenomLevel)
     
   }
 
@@ -865,8 +874,6 @@ cleanContrast <- function(object, contrast, expanded=FALSE, listValues, test) {
       contrastName <- paste(contrastFactor,contrastNumLevel,"vs",contrastDenomLevel)
     }
 
-    contrastAllZero <- contrastAllZeroNumeric(object, contrast)
-    
     # now get the contrast
     contrastResults <- getContrast(object, contrast, useT=FALSE, df)
     lfcType <- if (attr(object,"betaPrior")) "MAP" else "MLE"
@@ -881,15 +888,6 @@ cleanContrast <- function(object, contrast, expanded=FALSE, listValues, test) {
                  contrastResults)
     
   }
-
-  # if the counts in all samples included in contrast are zero
-  # then zero out the LFC, Wald stat and p-value set to 1
-  contrastAllZero <- contrastAllZero & !mcols(object)$allZero
-  if (sum(contrastAllZero) > 0) {
-    res$log2FoldChange[contrastAllZero] <- 0
-    res$stat[contrastAllZero] <- 0
-    res$pvalue[contrastAllZero] <- 1
-  }
   
   # if test is "LRT", overwrite the statistic and p-value
   # (we only ran contrast for the coefficient)
@@ -1063,35 +1061,3 @@ or the denominator (second element of contrast list), but not both")
   return(contrast)
 }
 
-
-contrastAllZeroCharacter <- function(object, contrastFactor, contrastNumLevel, contrastDenomLevel) {
-  cts <- counts(object)
-  f <- colData(object)[[contrastFactor]]
-  cts.sub <- cts[ , f %in% c(contrastNumLevel, contrastDenomLevel), drop=FALSE ]
-  rowSums( cts.sub == 0 ) == ncol(cts.sub)
-}
-
-contrastAllZeroNumeric <- function(object, contrast) {
-  if (is.null(attr(object,"modelMatrix"))) {
-    stop("was expecting a model matrix stored as an attribute of the DESeqDataSet")
-  }
-  modelMatrix <- attr(object, "modelMatrix")
-
-  # note: this extra leg-work to zero out LFC, lfcSE, and set p-value to 1
-  # for contrasts comparing groups where both groups have all zeros
-  # is only implemented for the case in which we can identify
-  # the relevant samples by multiplying the model matrix
-  # with a vector where the non-zero elements of the numeric contrast are replaced with 1
-
-  # so this code will not zero out in the case of standard model matrices
-  # where the user supplies a numeric vector that pulls out a single column
-  # of the model matrix, for example.
-  
-  if (all(contrast >= 0) | all(contrast <= 0)) {
-    return( rep(FALSE, nrow(object)) )
-  }
-  contrastBinary <- ifelse(contrast == 0, 0, 1)
-  whichSamples <- ifelse(modelMatrix %*% contrastBinary == 0, 0, 1)
-  zeroTest <- counts(object) %*% whichSamples
-  zeroTest == 0
-}
diff --git a/R/wrappers.R b/R/wrappers.R
new file mode 100644
index 0000000..0bd3e10
--- /dev/null
+++ b/R/wrappers.R
@@ -0,0 +1,108 @@
+# Fit dispersions for negative binomial GLM
+#
+# This function estimates the dispersion parameter (alpha) for negative binomial
+# generalized linear models. The fitting is performed on the log scale.
+#
+# ySEXP n by m matrix of counts
+# xSEXP m by k design matrix
+# mu_hatSEXP n by m matrix, the expected mean values, given beta-hat
+# log_alphaSEXP n length vector of initial guesses for log(alpha)
+# log_alpha_prior_meanSEXP n length vector of the fitted values for log(alpha)
+# log_alpha_prior_sigmasqSEXP a single numeric value for the variance of the prior
+# min_log_alphaSEXP the minimum value of log alpha
+# kappa_0SEXP a parameter used in calculting the initial proposal
+#   for the backtracking search
+#   initial proposal = log(alpha) + kappa_0 * deriv. of log lik. w.r.t. log(alpha)
+# tolSEXP tolerance for convergence in estimates
+# maxitSEXP maximum number of iterations
+# usePriorSEXP boolean variable, whether to use a prior or just calculate the MLE
+# weightsSEXP n by m matrix of weights
+# useWeightsSEXP whether to use weights
+#
+# return a list with elements: log_alpha, iter, iter_accept, last_change, initial_lp, intial_dlp, last_lp, last_dlp, last_d2lp
+fitDispWrapper <- function (ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP,
+                            log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP,
+                            tolSEXP, maxitSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP) {
+  # test for any NAs in arguments
+  arg.names <- names(formals(fitDispWrapper))
+  na.test <- sapply(mget(arg.names), function(x) any(is.na(x)))
+  if (any(na.test)) stop(paste("in call to fitDisp, the following arguments contain NA:",
+                               paste(arg.names[na.test],collapse=", ")))
+  fitDisp(ySEXP=ySEXP, xSEXP=xSEXP, mu_hatSEXP=mu_hatSEXP,
+          log_alphaSEXP=log_alphaSEXP, log_alpha_prior_meanSEXP=log_alpha_prior_meanSEXP,
+          log_alpha_prior_sigmasqSEXP=log_alpha_prior_sigmasqSEXP,
+          min_log_alphaSEXP=min_log_alphaSEXP, kappa_0SEXP=kappa_0SEXP,
+          tolSEXP=tolSEXP, maxitSEXP=maxitSEXP, usePriorSEXP=usePriorSEXP,
+          weightsSEXP=weightsSEXP, useWeightsSEXP=useWeightsSEXP)
+}
+
+# Fit dispersions by evaluating over grid
+#
+# This function estimates the dispersion parameter (alpha) for negative binomial
+# generalized linear models. The fitting is performed on the log scale.
+#
+# ySEXP n by m matrix of counts
+# xSEXP m by k design matrix
+# mu_hatSEXP n by m matrix, the expected mean values, given beta-hat
+# disp_gridSEXP the grid over which to estimate
+# log_alpha_prior_meanSEXP n length vector of the fitted values for log(alpha)
+# log_alpha_prior_sigmasqSEXP a single numeric value for the variance of the prior
+# usePriorSEXP boolean variable, whether to use a prior or just calculate the MLE
+# weightsSEXP n by m matrix of weights
+# useWeightsSEXP whether to use weights
+#
+# return a list with elements: 
+fitDispGridWrapper <- function(y, x, mu, logAlphaPriorMean, logAlphaPriorSigmaSq, usePrior,
+                               weightsSEXP, useWeightsSEXP) {
+  # test for any NAs in arguments
+  arg.names <- names(formals(fitDispGridWrapper))
+  na.test <- sapply(mget(arg.names), function(x) any(is.na(x)))
+  if (any(na.test)) stop(paste("in call to fitDispGridWrapper, the following arguments contain NA:",
+                               paste(arg.names[na.test],collapse=", ")))
+  minLogAlpha <- log(1e-8)
+  maxLogAlpha <- log(max(10, ncol(y)))
+  dispGrid <- seq(from=minLogAlpha, to=maxLogAlpha, length=15)
+  logAlpha <- fitDispGrid(ySEXP=y, xSEXP=x, mu_hatSEXP=mu, disp_gridSEXP=dispGrid,
+                          log_alpha_prior_meanSEXP=logAlphaPriorMean,
+                          log_alpha_prior_sigmasqSEXP=logAlphaPriorSigmaSq,
+                          usePriorSEXP=usePrior,
+                          weightsSEXP=weightsSEXP, useWeightsSEXP=useWeightsSEXP)$log_alpha
+  exp(logAlpha)
+}
+
+# Fit beta coefficients for negative binomial GLM
+#
+# This function estimates the coefficients (betas) for negative binomial generalized linear models.
+#
+# ySEXP n by m matrix of counts
+# xSEXP m by k design matrix
+# nfSEXP n by m matrix of normalization factors
+# alpha_hatSEXP n length vector of the disperion estimates
+# contrastSEXP a k length vector for a possible contrast
+# beta_matSEXP n by k matrix of the initial estimates for the betas
+# lambdaSEXP k length vector of the ridge values
+# weightsSEXP n by m matrix of weights
+# useWeightsSEXP whether to use weights
+# tolSEXP tolerance for convergence in estimates
+# maxitSEXP maximum number of iterations
+# useQRSEXP whether to use QR decomposition
+#
+# Note: at this level the betas are on the natural log scale
+fitBetaWrapper <- function (ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP,
+                            beta_matSEXP, lambdaSEXP, weightsSEXP, useWeightsSEXP,
+                            tolSEXP, maxitSEXP, useQRSEXP) {
+  if ( missing(contrastSEXP) ) {
+    # contrast is not required, just give 1,0,0,...
+    contrastSEXP <- c(1,rep(0,ncol(xSEXP)-1))
+  }
+  # test for any NAs in arguments
+  arg.names <- names(formals(fitBetaWrapper))
+  na.test <- sapply(mget(arg.names), function(x) any(is.na(x)))
+  if (any(na.test)) stop(paste("in call to fitBeta, the following arguments contain NA:",
+                               paste(arg.names[na.test],collapse=", ")))
+  
+  fitBeta(ySEXP=ySEXP, xSEXP=xSEXP, nfSEXP=nfSEXP, alpha_hatSEXP=alpha_hatSEXP,
+          contrastSEXP=contrastSEXP, beta_matSEXP=beta_matSEXP,
+          lambdaSEXP=lambdaSEXP, weightsSEXP=weightsSEXP, useWeightsSEXP=useWeightsSEXP,
+          tolSEXP=tolSEXP, maxitSEXP=maxitSEXP, useQRSEXP=useQRSEXP)
+}
diff --git a/build/vignette.rds b/build/vignette.rds
index 13dfd64..ac13429 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/inst/CITATION b/inst/CITATION
index 856126d..d33064d 100644
--- a/inst/CITATION
+++ b/inst/CITATION
@@ -1,6 +1,6 @@
 citEntry(entry="article",
          title = "Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2",
-         author = personList( as.person("Michael I Love"),
+         author = personList( as.person("Michael I. Love"),
                               as.person("Wolfgang Huber"),
                               as.person("Simon Anders")),
          year = 2014,
@@ -10,6 +10,6 @@ citEntry(entry="article",
          issue = 12,
          pages = 550,
          textVersion = 
-         paste("Michael I Love, Wolfgang Huber and Simon Anders (2014):", 
-               "Moderated estimation of fold change and dispersion for RNA-Seq data with DESeq2.",
-                "Genome Biology" ) )
+         paste("Love, M.I., Huber, W., Anders, S.", 
+               "Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2",
+                "Genome Biology 15(12):550 (2014)" ) )
diff --git a/inst/doc/DESeq2.R b/inst/doc/DESeq2.R
index 8b0ef78..a859340 100644
--- a/inst/doc/DESeq2.R
+++ b/inst/doc/DESeq2.R
@@ -1,162 +1,165 @@
-## ----style, eval=TRUE, echo=FALSE, results="asis"----------------
-BiocStyle::latex2()
-
-## ----knitr, echo=FALSE, results="hide"---------------------------
-library("knitr")
-opts_chunk$set(
-  tidy=FALSE,
-  dev="png",
-  fig.show="hide",
-  fig.width=4, fig.height=4.5,
-  fig.pos="tbh",
-  cache=TRUE,
-  message=FALSE)
-
-## ----loadDESeq2, echo=FALSE--------------------------------------
-library("DESeq2")
+## ----setup, echo=FALSE, results="hide"-----------------------------------
+knitr::opts_chunk$set(tidy=FALSE, cache=TRUE,
+                      dev="png",
+                      message=FALSE, error=FALSE, warning=TRUE)
+
+## ----quickStart, eval=FALSE----------------------------------------------
+#  dds <- DESeqDataSetFromMatrix(countData = cts,
+#                                colData = coldata,
+#                                design= ~ batch + condition)
+#  dds <- DESeq(dds)
+#  res <- results(dds, contrast=c("condition","treated","control"))
 
-## ----options, results="hide", echo=FALSE-------------------------
-options(digits=3, prompt=" ", continue=" ")
+## ----txiSetup------------------------------------------------------------
+library("tximport")
+library("readr")
+library("tximportData")
+dir <- system.file("extdata", package="tximportData")
+samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
+samples$condition <- factor(rep(c("A","B"),each=3))
+rownames(samples) <- samples$run
+samples[,c("pop","center","run","condition")]
 
-## ----quick, eval=FALSE-------------------------------------------
-#  dds <- DESeqDataSet(se, design = ~ batch + condition)
-#  dds <- DESeq(dds)
-#  res <- results(dds, contrast=c("condition","trt","con"))
+## ----txiFiles------------------------------------------------------------
+files <- file.path(dir,"salmon", samples$run, "quant.sf")
+names(files) <- samples$run
+tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
 
-## ----loadSumExp--------------------------------------------------
-library("airway")
-data("airway")
-se <- airway
+## ----tximport, results="hide"--------------------------------------------
+txi <- tximport(files, type="salmon", tx2gene=tx2gene)
 
-## ----sumExpInput-------------------------------------------------
+## ----txi2dds, results="hide"---------------------------------------------
 library("DESeq2")
-ddsSE <- DESeqDataSet(se, design = ~ cell + dex)
-ddsSE
+ddsTxi <- DESeqDataSetFromTximport(txi,
+                                   colData = samples,
+                                   design = ~ condition)
 
-## ----loadPasilla-------------------------------------------------
+## ----loadPasilla---------------------------------------------------------
 library("pasilla")
-pasCts <- system.file("extdata", "pasilla_gene_counts.tsv",
-                 package="pasilla", mustWork=TRUE)
-pasAnno <- system.file("extdata", "pasilla_sample_annotation.csv",
+pasCts <- system.file("extdata",
+                      "pasilla_gene_counts.tsv",
+                      package="pasilla", mustWork=TRUE)
+pasAnno <- system.file("extdata",
+                       "pasilla_sample_annotation.csv",
                        package="pasilla", mustWork=TRUE)
-countData <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
-colData <- read.csv(pasAnno, row.names=1)
-colData <- colData[,c("condition","type")]
-
-## ----showPasilla-------------------------------------------------
-head(countData)
-head(colData)
-
-## ----reorderPasila-----------------------------------------------
-rownames(colData) <- sub("fb","",rownames(colData))
-all(rownames(colData) %in% colnames(countData))
-countData <- countData[, rownames(colData)]
-all(rownames(colData) == colnames(countData))
-
-## ----matrixInput-------------------------------------------------
-dds <- DESeqDataSetFromMatrix(countData = countData,
-                              colData = colData,
-                              design = ~ condition)
-dds
+cts <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
+coldata <- read.csv(pasAnno, row.names=1)
+coldata <- coldata[,c("condition","type")]
 
-## ----addFeatureData----------------------------------------------
-featureData <- data.frame(gene=rownames(countData))
-(mcols(dds) <- DataFrame(mcols(dds), featureData))
+## ----showPasilla---------------------------------------------------------
+head(cts)
+head(coldata)
 
-## ----tximport----------------------------------------------------
-library("tximport")
-library("readr")
-library("tximportData")
-dir <- system.file("extdata", package="tximportData")
-samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
-files <- file.path(dir,"salmon", samples$run, "quant.sf")
-names(files) <- paste0("sample",1:6)
-tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
-txi <- tximport(files, type="salmon", tx2gene=tx2gene, reader=read_tsv)
+## ----reorderPasila-------------------------------------------------------
+rownames(coldata) <- sub("fb","",rownames(coldata))
+all(rownames(coldata) %in% colnames(cts))
+cts <- cts[, rownames(coldata)]
+all(rownames(coldata) == colnames(cts))
+
+## ----matrixInput---------------------------------------------------------
+library("DESeq2")
+dds <- DESeqDataSetFromMatrix(countData = cts,
+                              colData = coldata,
+                              design = ~ condition)
+dds
 
-## ----txi2dds-----------------------------------------------------
-coldata <- data.frame(condition=factor(rep(c("A","B"),each=3)))
-rownames(coldata) <- colnames(txi$counts)
-ddsTxi <- DESeqDataSetFromTximport(txi, colData=coldata,
-                                   design=~ condition)
+## ----addFeatureData------------------------------------------------------
+featureData <- data.frame(gene=rownames(cts))
+mcols(dds) <- DataFrame(mcols(dds), featureData)
+mcols(dds)
 
-## ----htseqDirI, eval=FALSE---------------------------------------
+## ----htseqDirI, eval=FALSE-----------------------------------------------
 #  directory <- "/path/to/your/files/"
 
-## ----htseqDirII--------------------------------------------------
-directory <- system.file("extdata", package="pasilla", mustWork=TRUE)
+## ----htseqDirII----------------------------------------------------------
+directory <- system.file("extdata", package="pasilla",
+                         mustWork=TRUE)
 
-## ----htseqInput--------------------------------------------------
+## ----htseqInput----------------------------------------------------------
 sampleFiles <- grep("treated",list.files(directory),value=TRUE)
 sampleCondition <- sub("(.*treated).*","\\1",sampleFiles)
 sampleTable <- data.frame(sampleName = sampleFiles,
                           fileName = sampleFiles,
                           condition = sampleCondition)
+
+## ----hsteqDds------------------------------------------------------------
+library("DESeq2")
 ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
                                        directory = directory,
                                        design= ~ condition)
 ddsHTSeq
 
-## ----prefilter---------------------------------------------------
+## ----loadSumExp----------------------------------------------------------
+library("airway")
+data("airway")
+se <- airway
+
+## ----sumExpInput---------------------------------------------------------
+library("DESeq2")
+ddsSE <- DESeqDataSet(se, design = ~ cell + dex)
+ddsSE
+
+## ----prefilter-----------------------------------------------------------
 dds <- dds[ rowSums(counts(dds)) > 1, ]
 
-## ----factorlvl---------------------------------------------------
+## ----factorlvl-----------------------------------------------------------
 dds$condition <- factor(dds$condition, levels=c("untreated","treated"))
 
-## ----relevel-----------------------------------------------------
+## ----relevel-------------------------------------------------------------
 dds$condition <- relevel(dds$condition, ref="untreated")
 
-## ----droplevels--------------------------------------------------
+## ----droplevels----------------------------------------------------------
 dds$condition <- droplevels(dds$condition)
 
-## ----deseq-------------------------------------------------------
+## ----deseq---------------------------------------------------------------
 dds <- DESeq(dds)
 res <- results(dds)
 res
 
-## ----parallel, eval=FALSE----------------------------------------
+## ----lfcShrink-----------------------------------------------------------
+resultsNames(dds)
+resLFC <- lfcShrink(dds, coef=2, res=res)
+resLFC
+
+## ----parallel, eval=FALSE------------------------------------------------
 #  library("BiocParallel")
 #  register(MulticoreParam(4))
 
-## ----resOrder----------------------------------------------------
+## ----resOrder------------------------------------------------------------
 resOrdered <- res[order(res$padj),]
 
-## ----sumRes------------------------------------------------------
+## ----sumRes--------------------------------------------------------------
 summary(res)
 
-## ----sumRes01----------------------------------------------------
+## ----sumRes01------------------------------------------------------------
 sum(res$padj < 0.1, na.rm=TRUE)
 
-## ----resAlpha05--------------------------------------------------
+## ----resAlpha05----------------------------------------------------------
 res05 <- results(dds, alpha=0.05)
 summary(res05)
 sum(res05$padj < 0.05, na.rm=TRUE)
 
-## ----IHW---------------------------------------------------------
+## ----IHW-----------------------------------------------------------------
 library("IHW")
 resIHW <- results(dds, filterFun=ihw)
 summary(resIHW)
 sum(resIHW$padj < 0.1, na.rm=TRUE)
 metadata(resIHW)$ihwResult
 
-## ----MA, fig.width=4.5, fig.height=4.5---------------------------
-plotMA(res, main="DESeq2", ylim=c(-2,2))
+## ----MA------------------------------------------------------------------
+plotMA(res, ylim=c(-2,2))
 
-## ----MAidentify, eval=FALSE--------------------------------------
+## ----shrunkMA------------------------------------------------------------
+plotMA(resLFC, ylim=c(-2,2))
+
+## ----MAidentify, eval=FALSE----------------------------------------------
 #  idx <- identify(res$baseMean, res$log2FoldChange)
 #  rownames(res)[idx]
 
-## ----resMLE------------------------------------------------------
-resMLE <- results(dds, addMLE=TRUE)
-head(resMLE, 4)
-
-## ----MANoPrior, fig.width=4.5, fig.height=4.5--------------------
-plotMA(resMLE, MLE=TRUE, main="unshrunken LFC", ylim=c(-2,2))
-
-## ----plotCounts, dev="pdf", fig.width=4.5, fig.height=5----------
+## ----plotCounts----------------------------------------------------------
 plotCounts(dds, gene=which.min(res$padj), intgroup="condition")
 
-## ----plotCountsAdv, dev="pdf", fig.width=3.5, fig.height=3.5-----
+## ----plotCountsAdv-------------------------------------------------------
 d <- plotCounts(dds, gene=which.min(res$padj), intgroup="condition", 
                 returnData=TRUE)
 library("ggplot2")
@@ -164,86 +167,67 @@ ggplot(d, aes(x=condition, y=count)) +
   geom_point(position=position_jitter(w=0.1,h=0)) + 
   scale_y_log10(breaks=c(25,100,400))
 
-## ----metadata----------------------------------------------------
+## ----metadata------------------------------------------------------------
 mcols(res)$description
 
-## ----export, eval=FALSE------------------------------------------
+## ----export, eval=FALSE--------------------------------------------------
 #  write.csv(as.data.frame(resOrdered),
 #            file="condition_treated_results.csv")
 
-## ----subset------------------------------------------------------
+## ----subset--------------------------------------------------------------
 resSig <- subset(resOrdered, padj < 0.1)
 resSig
 
-## ----multifactor-------------------------------------------------
+## ----multifactor---------------------------------------------------------
 colData(dds)
 
-## ----copyMultifactor---------------------------------------------
+## ----copyMultifactor-----------------------------------------------------
 ddsMF <- dds
 
-## ----replaceDesign-----------------------------------------------
+## ----replaceDesign-------------------------------------------------------
 design(ddsMF) <- formula(~ type + condition)
 ddsMF <- DESeq(ddsMF)
 
-## ----multiResults------------------------------------------------
+## ----multiResults--------------------------------------------------------
 resMF <- results(ddsMF)
 head(resMF)
 
-## ----multiTypeResults--------------------------------------------
+## ----multiTypeResults----------------------------------------------------
 resMFType <- results(ddsMF,
                      contrast=c("type", "single-read", "paired-end"))
 head(resMFType)
 
-## ----rlogAndVST--------------------------------------------------
+## ----rlogAndVST----------------------------------------------------------
 rld <- rlog(dds, blind=FALSE)
 vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
 vsd.fast <- vst(dds, blind=FALSE)
 head(assay(rld), 3)
 
-## ----vsd1, echo=FALSE, fig.width=4.5, fig.height=4.5, fig.show="asis", fig.small=TRUE, fig.pos="!bt", fig.cap="VST and log2. Graphs of the variance stabilizing transformation for sample 1, in blue, and of the transformation $f(n) = \\log_2(n/s_1)$, in black. $n$ are the counts and $s_1$ is the size factor for the first sample.\\label{figure/vsd1-1}"----
-px     <- counts(dds)[,1] / sizeFactors(dds)[1]
-ord    <- order(px)
-ord    <- ord[px[ord] < 150]
-ord    <- ord[seq(1, length(ord), length=50)]
-last   <- ord[length(ord)]
-vstcol <- c("blue", "black")
-matplot(px[ord],
-        cbind(assay(vsd)[, 1], log2(px))[ord, ],
-        type="l", lty=1, col=vstcol, xlab="n", ylab="f(n)")
-legend("bottomright",
-       legend = c(
-        expression("variance stabilizing transformation"),
-        expression(log[2](n/s[1]))),
-       fill=vstcol)
-
-## ----meansd, fig.width=4, fig.height=3, fig.show="asis", fig.wide=TRUE, fig.pos="tb", out.width=".32\\linewidth", fig.cap="Per-gene standard deviation (taken across samples), against the rank of the mean. {\\bfhelvet(a)} for the shifted logarithm $\\log_2(n+1)$, the regularized log transformation {\\bfhelvet(b)} and the variance stabilizing transformation {\\bfhelvet(c)}.\\label{fig:meansd}", fig.subcap=""----
+## ----meansd--------------------------------------------------------------
+# this gives log2(n + 1)
+ntd <- normTransform(dds)
 library("vsn")
 notAllZero <- (rowSums(counts(dds))>0)
-meanSdPlot(log2(counts(dds,normalized=TRUE)[notAllZero,] + 1))
+meanSdPlot(assay(ntd)[notAllZero,])
 meanSdPlot(assay(rld[notAllZero,]))
 meanSdPlot(assay(vsd[notAllZero,]))
 
-## ----heatmap, dev="pdf", fig.width=5, fig.height=7---------------
+## ----heatmap-------------------------------------------------------------
 library("pheatmap")
 select <- order(rowMeans(counts(dds,normalized=TRUE)),
                 decreasing=TRUE)[1:20]
-
-nt <- normTransform(dds) # defaults to log2(x+1)
-log2.norm.counts <- assay(nt)[select,]
 df <- as.data.frame(colData(dds)[,c("condition","type")])
-pheatmap(log2.norm.counts, cluster_rows=FALSE, show_rownames=FALSE,
+pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
          cluster_cols=FALSE, annotation_col=df)
-
 pheatmap(assay(rld)[select,], cluster_rows=FALSE, show_rownames=FALSE,
          cluster_cols=FALSE, annotation_col=df)
-
 pheatmap(assay(vsd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
          cluster_cols=FALSE, annotation_col=df)
 
-## ----sampleClust-------------------------------------------------
+## ----sampleClust---------------------------------------------------------
 sampleDists <- dist(t(assay(rld)))
 
-## ----figHeatmapSamples, dev="pdf", fig.width=7, fig.height=7, fig.show="asis", fig.small=TRUE, fig.pos="tb", fig.cap="Sample-to-sample distances.  Heatmap showing the Euclidean distances between the samples as calculated from the regularized log transformation.\\label{figure/figHeatmapSamples-1}"----
+## ----figHeatmapSamples---------------------------------------------------
 library("RColorBrewer")
 sampleDistMatrix <- as.matrix(sampleDists)
 rownames(sampleDistMatrix) <- paste(rld$condition, rld$type, sep="-")
@@ -254,34 +238,34 @@ pheatmap(sampleDistMatrix,
          clustering_distance_cols=sampleDists,
          col=colors)
 
-## ----figPCA, dev="pdf", fig.width=5, fig.height=3----------------
+## ----figPCA--------------------------------------------------------------
 plotPCA(rld, intgroup=c("condition", "type"))
 
-## ----figPCA2, dev="pdf", fig.width=5, fig.height=3---------------
-data <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
-percentVar <- round(100 * attr(data, "percentVar"))
-ggplot(data, aes(PC1, PC2, color=condition, shape=type)) +
+## ----figPCA2-------------------------------------------------------------
+pcaData <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
+percentVar <- round(100 * attr(pcaData, "percentVar"))
+ggplot(pcaData, aes(PC1, PC2, color=condition, shape=type)) +
   geom_point(size=3) +
   xlab(paste0("PC1: ",percentVar[1],"% variance")) +
   ylab(paste0("PC2: ",percentVar[2],"% variance")) + 
   coord_fixed()
 
-## ----WaldTest, eval=FALSE----------------------------------------
+## ----WaldTest, eval=FALSE------------------------------------------------
 #  dds <- estimateSizeFactors(dds)
 #  dds <- estimateDispersions(dds)
 #  dds <- nbinomWaldTest(dds)
 
-## ----simpleContrast, eval=FALSE----------------------------------
+## ----simpleContrast, eval=FALSE------------------------------------------
 #  results(dds, contrast=c("condition","C","B"))
 
-## ----combineFactors, eval=FALSE----------------------------------
+## ----combineFactors, eval=FALSE------------------------------------------
 #  dds$group <- factor(paste0(dds$genotype, dds$condition))
 #  design(dds) <- ~ group
 #  dds <- DESeq(dds)
 #  resultsNames(dds)
 #  results(dds, contrast=c("group", "IB", "IA"))
 
-## ----interFig, dev="pdf", fig.width=4, fig.height=3, echo=FALSE, results="hide"----
+## ----interFig, echo=FALSE, results="hide"--------------------------------
 npg <- 20
 mu <- 2^c(8,10,9,11,10,12)
 cond <- rep(rep(c("A","B"),each=npg),3)
@@ -300,7 +284,7 @@ plotit <- function(d, title) {
 plotit(d, "Gene 1") + ylim(7,13)
 lm(log2c ~ cond + geno + geno:cond, data=d)
 
-## ----interFig2, dev="pdf", fig.width=4, fig.height=3,  echo=FALSE, results="hide"----
+## ----interFig2, echo=FALSE, results="hide"-------------------------------
 mu[4] <- 2^12
 mu[6] <- 2^8
 counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
@@ -308,22 +292,22 @@ d2 <- data.frame(log2c=log2(counts + 1), cond, geno)
 plotit(d2, "Gene 2") + ylim(7,13)
 lm(log2c ~ cond + geno + geno:cond, data=d2)
 
-## ----simpleLRT, eval=FALSE---------------------------------------
+## ----simpleLRT, eval=FALSE-----------------------------------------------
 #  dds <- DESeq(dds, test="LRT", reduced=~1)
 #  res <- results(dds)
 
-## ----simpleLRT2, eval=FALSE--------------------------------------
+## ----simpleLRT2, eval=FALSE----------------------------------------------
 #  dds <- DESeq(dds, test="LRT", reduced=~batch)
 #  res <- results(dds)
 
-## ----boxplotCooks, fig.show="asis", fig.small=TRUE, fig.cap="Boxplot of Cook's distances.  Here we can look to see if one sample has much higher Cook's distances than the other samples. In this case, the samples all have comparable range of Cook's distances.\\label{figure/boxplotCooks-1}"----
+## ----boxplotCooks--------------------------------------------------------
 par(mar=c(8,5,2,2))
 boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
 
-## ----dispFit, fig.show="asis", fig.small=TRUE, fig.cap="Dispersion plot.  The dispersion estimate plot shows the gene-wise estimates (black), the fitted values (red), and the final maximum \\textit{a posteriori} estimates used in testing (blue).\\label{figure/dispFit-1}"----
+## ----dispFit-------------------------------------------------------------
 plotDispEsts(dds)
 
-## ----dispFitCustom-----------------------------------------------
+## ----dispFitCustom-------------------------------------------------------
 ddsCustom <- dds
 useForMedian <- mcols(ddsCustom)$dispGeneEst > 1e-7
 medianDisp <- median(mcols(ddsCustom)$dispGeneEst[useForMedian],
@@ -331,7 +315,7 @@ medianDisp <- median(mcols(ddsCustom)$dispGeneEst[useForMedian],
 dispersionFunction(ddsCustom) <- function(mu) medianDisp
 ddsCustom <- estimateDispersionsMAP(ddsCustom)
 
-## ----filtByMean, dev="pdf", fig.show="asis", fig.small=TRUE, fig.cap="Independent filtering.  The \\Rfunction{results} function maximizes the number of rejections (adjusted $p$ value less than a significance level), over the quantiles of a filter statistic (the mean of normalized counts). The threshold chosen (vertical line) is the lowest quantile of the filter for which the number of rejections is within 1 residual standard deviation to the peak of a curve fit to the number of rejecti [...]
+## ----filtByMean----------------------------------------------------------
 metadata(res)$alpha
 metadata(res)$filterThreshold
 plot(metadata(res)$filterNumRej, 
@@ -340,23 +324,21 @@ plot(metadata(res)$filterNumRej,
 lines(metadata(res)$lo.fit, col="red")
 abline(v=metadata(res)$filterTheta)
 
-## ----noFilt------------------------------------------------------
+## ----noFilt--------------------------------------------------------------
 resNoFilt <- results(dds, independentFiltering=FALSE)
 addmargins(table(filtering=(res$padj < .1),
                  noFiltering=(resNoFilt$padj < .1)))
 
-## ----ddsNoPrior--------------------------------------------------
+## ----ddsNoPrior----------------------------------------------------------
 ddsNoPrior <- DESeq(dds, betaPrior=FALSE)
 
-## ----lfcThresh, fig.show="asis", fig.cap='MA-plots of tests of log2 fold change with respect to a threshold value.  Going left to right across rows, the tests are for \\Robject{altHypothesis = "greaterAbs"}, \\Robject{"lessAbs"}, \\Robject{"greater"}, and \\Robject{"less"}.\\label{figure/lfcThresh-1}'----
+## ----lfcThresh-----------------------------------------------------------
 par(mfrow=c(2,2),mar=c(2,2,1,1))
 yl <- c(-2.5,2.5)
-
 resGA <- results(dds, lfcThreshold=.5, altHypothesis="greaterAbs")
 resLA <- results(ddsNoPrior, lfcThreshold=.5, altHypothesis="lessAbs")
 resG <- results(dds, lfcThreshold=.5, altHypothesis="greater")
 resL <- results(dds, lfcThreshold=.5, altHypothesis="less")
-
 plotMA(resGA, ylim=yl)
 abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
 plotMA(resLA, ylim=yl)
@@ -366,85 +348,91 @@ abline(h=.5,col="dodgerblue",lwd=2)
 plotMA(resL, ylim=yl)
 abline(h=-.5,col="dodgerblue",lwd=2)
 
-## ----mcols-------------------------------------------------------
+## ----mcols---------------------------------------------------------------
 mcols(dds,use.names=TRUE)[1:4,1:4]
-# here using substr() only for display purposes
 substr(names(mcols(dds)),1,10) 
 mcols(mcols(dds), use.names=TRUE)[1:4,]
 
-## ----muAndCooks--------------------------------------------------
+## ----muAndCooks----------------------------------------------------------
 head(assays(dds)[["mu"]])
 head(assays(dds)[["cooks"]])
 
-## ----dispersions-------------------------------------------------
+## ----dispersions---------------------------------------------------------
 head(dispersions(dds))
-# which is the same as 
 head(mcols(dds)$dispersion)
 
-## ----sizefactors-------------------------------------------------
+## ----sizefactors---------------------------------------------------------
 sizeFactors(dds)
 
-## ----coef--------------------------------------------------------
+## ----coef----------------------------------------------------------------
 head(coef(dds))
 
-## ----betaPriorVar------------------------------------------------
+## ----betaPriorVar--------------------------------------------------------
 attr(dds, "betaPriorVar")
 
-## ----dispPriorVar------------------------------------------------
+## ----dispPriorVar--------------------------------------------------------
 dispersionFunction(dds)
 attr(dispersionFunction(dds), "dispPriorVar")
 
-## ----versionNum--------------------------------------------------
+## ----versionNum----------------------------------------------------------
 metadata(dds)[["version"]]
 
-## ----normFactors, eval=FALSE-------------------------------------
+## ----normFactors, eval=FALSE---------------------------------------------
 #  normFactors <- normFactors / exp(rowMeans(log(normFactors)))
 #  normalizationFactors(dds) <- normFactors
 
-## ----offsetTransform, eval=FALSE---------------------------------
+## ----offsetTransform, eval=FALSE-----------------------------------------
 #  cqnOffset <- cqnObject$glm.offset
 #  cqnNormFactors <- exp(cqnOffset)
 #  EDASeqNormFactors <- exp(-1 * EDASeqOffset)
 
-## ----lineardep, echo=FALSE---------------------------------------
-data.frame(batch=factor(c(1,1,2,2)), condition=factor(c("A","A","B","B")))
-
-## ----lineardep2, echo=FALSE--------------------------------------
-data.frame(batch=factor(c(1,1,1,1,2,2)), condition=factor(c("A","A","B","B","C","C")))
+## ----lineardep, echo=FALSE-----------------------------------------------
+DataFrame(batch=factor(c(1,1,2,2)), condition=factor(c("A","A","B","B")))
 
-## ----lineardep3, echo=FALSE--------------------------------------
-data.frame(batch=factor(c(1,1,1,2,2,2)), condition=factor(c("A","B","C","A","B","C")))
+## ----lineardep2, echo=FALSE----------------------------------------------
+DataFrame(batch=factor(c(1,1,1,1,2,2)), condition=factor(c("A","A","B","B","C","C")))
 
-## ----groupeffect-------------------------------------------------
-(coldata <- data.frame(grp=factor(rep(c("X","Y"),each=4)),
-                       ind=factor(rep(1:4,each=2)),
-                       cnd=factor(rep(c("A","B"),4))))
+## ----lineardep3, echo=FALSE----------------------------------------------
+DataFrame(batch=factor(c(1,1,1,2,2,2)), condition=factor(c("A","B","C","A","B","C")))
 
-## ----groupeffect2------------------------------------------------
-coldata$ind.n <- factor(rep(rep(1:2,each=2),2))
+## ----groupeffect---------------------------------------------------------
+coldata <- DataFrame(grp=factor(rep(c("X","Y"),each=6)),
+                       ind=factor(rep(1:6,each=2)),
+                      cnd=factor(rep(c("A","B"),6)))
 coldata
 
-## ----groupeffect3------------------------------------------------
+## ------------------------------------------------------------------------
+as.data.frame(coldata)
+
+## ----groupeffect2--------------------------------------------------------
+coldata$ind.n <- factor(rep(rep(1:3,each=2),2))
+as.data.frame(coldata)
+
+## ----groupeffect3--------------------------------------------------------
 model.matrix(~ grp + grp:ind.n + grp:cnd, coldata)
 
-## ----groupeffect4, eval=FALSE------------------------------------
+## ----groupeffect4, eval=FALSE--------------------------------------------
 #  results(dds, contrast=list("grpY.cndB","grpX.cndB"))
 
-## ----missingcombo------------------------------------------------
+## ----missingcombo--------------------------------------------------------
 group <- factor(rep(1:3,each=6))
 condition <- factor(rep(rep(c("A","B","C"),each=2),3))
-(d <- data.frame(group, condition)[-c(17,18),])
+d <- DataFrame(group, condition)[-c(17,18),]
+as.data.frame(d)
 
-## ----missingcombo2-----------------------------------------------
+## ----missingcombo2-------------------------------------------------------
 m1 <- model.matrix(~ condition*group, d)
 colnames(m1)
 unname(m1)
+all.zero <- apply(m1, 2, function(x) all(x==0))
+all.zero
 
-## ----missingcombo3-----------------------------------------------
-m1 <- m1[,-9]
+## ----missingcombo3-------------------------------------------------------
+idx <- which(all.zero)
+m1 <- m1[,-idx]
 unname(m1)
 
-## ----cooksPlot, fig.show="asis", fig.small=TRUE, fig.cap="Cook's distance.  Plot of the maximum Cook's distance per gene over the rank of the Wald statistics for the condition. The two regions with small Cook's distances are genes with a single count in one sample. The horizontal line is the default cutoff used for 7 samples and 3 estimated parameters.\\label{figure/cooksPlot-1}"----
+## ----cooksPlot-----------------------------------------------------------
 W <- res$stat
 maxCooks <- apply(assays(dds)[["cooks"]],1,max)
 idx <- !is.na(W)
@@ -455,31 +443,31 @@ m <- ncol(dds)
 p <- 3
 abline(h=qf(.99, p, m - p))
 
-## ----indFilt, fig.show="asis", fig.small=TRUE, fig.cap="Mean counts as a filter statistic.  The mean of normalized counts provides an independent statistic for filtering the tests. It is independent because the information about the variables in the design formula is not used. By filtering out genes which fall on the left side of the plot, the majority of the low $p$ values are kept.\\label{figure/indFilt-1}"----
+## ----indFilt-------------------------------------------------------------
 plot(res$baseMean+1, -log10(res$pvalue),
      log="x", xlab="mean of normalized counts",
      ylab=expression(-log[10](pvalue)),
      ylim=c(0,30),
      cex=.4, col=rgb(0,0,0,.3))
 
-## ----histindepfilt, dev="pdf", fig.width=7, fig.height=5---------
+## ----histindepfilt-------------------------------------------------------
 use <- res$baseMean > metadata(res)$filterThreshold
 h1 <- hist(res$pvalue[!use], breaks=0:50/50, plot=FALSE)
 h2 <- hist(res$pvalue[use], breaks=0:50/50, plot=FALSE)
 colori <- c(`do not pass`="khaki", `pass`="powderblue")
 
-## ----fighistindepfilt, fig.show="asis", fig.small=TRUE, fig.cap="Histogram of p values for all tests.  The area shaded in blue indicates the subset of those that pass the filtering, the area in khaki those that do not pass.\\label{figure/fighistindepfilt-1}"----
+## ----fighistindepfilt----------------------------------------------------
 barplot(height = rbind(h1$counts, h2$counts), beside = FALSE,
         col = colori, space = 0, main = "", ylab="frequency")
 text(x = c(0, length(h1$counts)), y = 0, label = paste(c(0,1)),
      adj = c(0.5,1.7), xpd=NA)
 legend("topright", fill=rev(colori), legend=rev(names(colori)))
 
-## ----vanillaDESeq, eval=FALSE------------------------------------
+## ----vanillaDESeq, eval=FALSE--------------------------------------------
 #  dds <- DESeq(dds, minReplicatesForReplace=Inf)
 #  res <- results(dds, cooksCutoff=FALSE, independentFiltering=FALSE)
 
-## ----varGroup, echo=FALSE, fig.width=5, fig.height=5, fig.show="asis", fig.small=TRUE, fig.cap="Extreme range of within-group variability.  Typically, it is recommended to run \\Rfunction{DESeq} across samples from all groups, for datasets with multiple groups. However, this simulated dataset shows a case where it would be preferable to compare groups A and B by creating a smaller dataset without the C samples. Group C has much higher within-group variability, which would inflate the p [...]
+## ----varGroup, echo=FALSE------------------------------------------------
 set.seed(3)
 dds1 <- makeExampleDESeqDataSet(n=1000,m=12,betaSD=.3,dispMeanRel=function(x) 0.01)
 dds2 <- makeExampleDESeqDataSet(n=1000,m=12,
@@ -494,20 +482,9 @@ dds <- cbind(dds1, dds2)
 rld <- rlog(dds, blind=FALSE, fitType="mean")
 plotPCA(rld)
 
-## ----overShrink, echo=FALSE, fig.width=5, fig.height=5, fig.show="asis", fig.small=TRUE, fig.cap="Example of a dataset with where the log fold change prior should be turned off.  Here we show a simulated MA-plot, where nearly all of the log fold changes are falling near the x-axis, with three genes that have very large log fold changes (note the y-axis is from -10 to 10 on the log2 scale). This would indicate a dataset where the log fold change prior would ``overshrink'' the large fold [...]
-plot(c(10^rnorm(1000, 3, 2),300,2000,5000), 
-     c(rnorm(1000, 0, .15), -5.5, -8.5, 7.5),
-     ylim=c(-10,10), log="x", cex=.4,
-     xlab="mean of normalized counts", 
-     ylab="log2 fold change")
-abline(h=0, col=rgb(1,0,0,.7))
-
-## ----convertNA, eval=FALSE---------------------------------------
+## ----convertNA, eval=FALSE-----------------------------------------------
 #  res$padj <- ifelse(is.na(res$padj), 1, res$padj)
 
-## ----sessInfo, results="asis", echo=FALSE------------------------
-toLatex(sessionInfo())
-
-## ----resetOptions, results="hide", echo=FALSE--------------------
-options(prompt="> ", continue="+ ")
+## ----sessionInfo---------------------------------------------------------
+sessionInfo()
 
diff --git a/inst/doc/DESeq2.Rmd b/inst/doc/DESeq2.Rmd
new file mode 100644
index 0000000..2969fc5
--- /dev/null
+++ b/inst/doc/DESeq2.Rmd
@@ -0,0 +1,2420 @@
+---
+title: "Analyzing RNA-seq data with DESeq2"
+author: "Michael I. Love, Simon Anders, and Wolfgang Huber"
+date: "`r BiocStyle::doc_date()`"
+package: "`r BiocStyle::pkg_ver('DESeq2')`"
+abstract: >
+  A basic task in the analysis of count data from RNA-seq is the
+  detection of differentially expressed genes. The count data are
+  presented as a table which reports, for each sample, the number of
+  sequence fragments that have been assigned to each gene. Analogous
+  data also arise for other assay types, including comparative ChIP-Seq,
+  HiC, shRNA screening, mass spectrometry.  An important analysis
+  question is the quantification and statistical inference of systematic
+  changes between conditions, as compared to within-condition
+  variability. The package DESeq2 provides methods to test for
+  differential expression by use of negative binomial generalized linear
+  models; the estimates of dispersion and logarithmic fold changes
+  incorporate data-driven prior distributions This vignette explains the
+  use of the package and demonstrates typical workflows.
+  [An RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene/)
+  on the Bioconductor website covers similar material to this vignette
+  but at a slower pace, including the generation of count matrices from
+  FASTQ files.
+  DESeq2 package version: `r packageVersion("DESeq2")`
+output:
+  rmarkdown::html_document:
+    highlight: pygments
+    toc: true
+    fig_width: 5
+bibliography: library.bib
+vignette: >
+  %\VignetteIndexEntry{Analyzing RNA-seq data with DESeq2}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding[utf8]{inputenc}
+---
+
+
+<!-- This is the source document -->
+
+
+```{r setup, echo=FALSE, results="hide"}
+knitr::opts_chunk$set(tidy=FALSE, cache=TRUE,
+                      dev="png",
+                      message=FALSE, error=FALSE, warning=TRUE)
+```	
+
+# Standard workflow
+
+**If you use DESeq2 in published research, please cite:**
+
+> Love, M.I., Huber, W., Anders, S.,
+> Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2, 
+> *Genome Biology* 2014, **15**:550.
+> [10.1186/s13059-014-0550-8](http://dx.doi.org/10.1186/s13059-014-0550-8)
+
+Other Bioconductor packages with similar aims are
+[edgeR](http://bioconductor.org/packages/edgeR),
+[limma](http://bioconductor.org/packages/limma),
+[DSS](http://bioconductor.org/packages/DSS),
+[EBSeq](http://bioconductor.org/packages/EBSeq), and 
+[baySeq](http://bioconductor.org/packages/baySeq).
+
+## Quick start
+
+Here we show the most basic steps for a differential expression
+analysis. There are a variety of steps upstream of DESeq2 that result
+in the generation of counts or estimated counts for each sample, which
+we will discuss in the sections below. This code chunk assumes that
+you have a count matrix called `cts` and a table of sample
+information called `coldata`.  The `design` indicates how to model the
+samples, here, that we want to measure the effect of the condition,
+controlling for batch differences. The two factor variables `batch`
+and `condition` should be columns of `coldata`.
+
+```{r quickStart, eval=FALSE}
+dds <- DESeqDataSetFromMatrix(countData = cts,
+                              colData = coldata,
+                              design= ~ batch + condition)
+dds <- DESeq(dds)
+res <- results(dds, contrast=c("condition","treated","control"))
+```
+
+The following starting functions will be explained below:
+
+* If you have transcript quantification files, as produced by
+  *Salmon*, *Sailfish*, or *kallisto*, you would use
+  *DESeqDataSetFromTximport*.
+* If you have *htseq-count* files, the first line would use
+  *DESeqDataSetFromHTSeq*.
+* If you have a *RangedSummarizedExperiment*, the first line would use 
+  *DESeqDataSet*.
+
+## How to get help for DESeq2
+
+Any and all DESeq2 questions should be posted to the 
+**Bioconductor support site**, which serves as a searchable knowledge
+base of questions and answers:
+
+<https://support.bioconductor.org>
+
+Posting a question and tagging with "DESeq2" will automatically send
+an alert to the package authors to respond on the support site.  See
+the first question in the list of [Frequently Asked Questions](#FAQ)
+(FAQ) for information about how to construct an informative post. 
+
+You should **not** email your question to the package authors, as we will
+just reply that the question should be posted to the 
+**Bioconductor support site**.
+
+## Input data
+
+### Why un-normalized counts?
+
+As input, the DESeq2 package expects count data as obtained, e.g.,
+from RNA-seq or another high-throughput sequencing experiment, in the form of a
+matrix of integer values. The value in the *i*-th row and the *j*-th column of
+the matrix tells how many reads can be assigned to gene *i* in sample *j*.
+Analogously, for other types of assays, the rows of the matrix might correspond
+e.g. to binding regions (with ChIP-Seq) or peptide sequences (with
+quantitative mass spectrometry). We will list method for obtaining count matrices
+in sections below.
+
+The values in the matrix should be un-normalized counts or estimated
+counts of sequencing reads (for
+single-end RNA-seq) or fragments (for paired-end RNA-seq). 
+The [RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene/)
+describes multiple techniques for preparing such count matrices.  It
+is important to provide count matrices as input for DESeq2's
+statistical model [@Love2014] to hold, as only the count values allow
+assessing the measurement precision correctly. The DESeq2 model
+internally corrects for library size, so transformed or normalized
+values such as counts scaled by library size should not be used as
+input.
+
+### The DESeqDataSet
+
+The object class used by the DESeq2 package to store the read counts 
+and the intermediate estimated quantities during statistical analysis
+is the *DESeqDataSet*, which will usually be represented in the code
+here as an object `dds`.
+
+A technical detail is that the *DESeqDataSet* class extends the
+*RangedSummarizedExperiment* class of the 
+[SummarizedExperiment](http://bioconductor.org/packages/SummarizedExperiment) package. 
+The "Ranged" part refers to the fact that the rows of the assay data 
+(here, the counts) can be associated with genomic ranges (the exons of genes).
+This association facilitates downstream exploration of results, making use of
+other Bioconductor packages' range-based functionality
+(e.g. find the closest ChIP-seq peaks to the differentially expressed genes).
+
+A *DESeqDataSet* object must have an associated *design formula*.
+The design formula expresses the variables which will be
+used in modeling. The formula should be a tilde (~) followed by the
+variables with plus signs between them (it will be coerced into an
+*formula* if it is not already). The design can be changed later, 
+however then all differential analysis steps should be repeated, 
+as the design formula is used to estimate the dispersions and 
+to estimate the log2 fold changes of the model. 
+
+*Note*: In order to benefit from the default settings of the
+package, you should put the variable of interest at the end of the
+formula and make sure the control level is the first level.
+
+We will now show 4 ways of constructing a *DESeqDataSet*, depending
+on what pipeline was used upstream of DESeq2 to generated counts or
+estimated counts:
+
+1) From [transcript abundance files and tximport](#tximport)
+2) From a [count matrix](#countmat)
+3) From [htseq-count files](#htseq)
+4) From a [SummarizedExperiment](#se) object
+
+<a name="tximport"/>
+
+### Transcript abundance files and *tximport* input
+
+A newer and recommended pipeline is to use fast transcript 
+abundance quantifiers upstream of DESeq2, and then to create
+gene-level count matrices for use with DESeq2 
+by importing the quantification data using the
+[tximport](http://bioconductor.org/packages/tximport) 
+package. This workflow allows users to import transcript abundance estimates
+from a variety of external software, including the following methods:
+
+* [Salmon](http://combine-lab.github.io/salmon/)
+  [@Patro2016Salmon]
+* [Sailfish](http://www.cs.cmu.edu/~ckingsf/software/sailfish/)
+  [@Patro2014Sailfish]
+* [kallisto](https://pachterlab.github.io/kallisto/about.html)
+  [@Bray2016Near]
+* [RSEM](http://deweylab.github.io/RSEM/)
+  [@Li2011RSEM]
+
+Some advantages of using the above methods for transcript abundance
+estimation are: 
+(i) this approach corrects for potential changes in gene length across samples 
+(e.g. from differential isoform usage) [@Trapnell2013Differential],
+(ii) some of these methods (*Salmon*, *Sailfish*, *kallisto*) 
+are substantially faster and require less memory
+and disk usage compared to alignment-based methods that require
+creation and storage of BAM files, and
+(iii) it is possible to avoid discarding those fragments that can
+align to multiple genes with homologous sequence, thus increasing
+sensitivity [@Robert2015Errors].
+
+Full details on the motivation and methods for importing transcript
+level abundance and count estimates, summarizing to gene-level count matrices 
+and producing an offset which corrects for potential changes in average
+transcript length across samples are described in [@Soneson2015].
+Note that the tximport-to-DESeq2 approach uses *estimated* gene
+counts from the transcript abundance quantifiers, but not *normalized*
+counts.
+
+Here, we demonstrate how to import transcript abundances
+and construct of a gene-level *DESeqDataSet* object
+from *Salmon* `quant.sf` files, which are
+stored in the [tximportData](http://bioconductor.org/packages/tximportData) package.
+You do not need the `tximportData` package for your analysis, it is
+only used here for demonstration.
+
+Note that, instead of locating `dir` using *system.file*,
+a user would typically just provide a path, e.g. `/path/to/quant/files`.
+For a typical use, the `condition` information should already be
+present as a column of the sample table `samples`, while here we
+construct artificial condition labels for demonstration.
+
+
+```{r txiSetup}
+library("tximport")
+library("readr")
+library("tximportData")
+dir <- system.file("extdata", package="tximportData")
+samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
+samples$condition <- factor(rep(c("A","B"),each=3))
+rownames(samples) <- samples$run
+samples[,c("pop","center","run","condition")]
+```
+
+Next we specify the path to the files using the appropriate columns of
+`samples`, and we read in a table that links transcripts to genes for
+this dataset.
+
+```{r txiFiles}
+files <- file.path(dir,"salmon", samples$run, "quant.sf")
+names(files) <- samples$run
+tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
+```
+
+We import the necessary quantification data for DESeq2 using the
+*tximport* function.  For further details on use of *tximport*,
+including the construction of the `tx2gene` table for linking
+transcripts to genes in your dataset, please refer to the 
+[tximport](http://bioconductor.org/packages/tximport) package vignette.
+
+```{r tximport, results="hide"}
+txi <- tximport(files, type="salmon", tx2gene=tx2gene)
+```
+
+Finally, we can construct a *DESeqDataSet* from the `txi` object and
+sample information in `samples`.
+
+```{r txi2dds, results="hide"}
+library("DESeq2")
+ddsTxi <- DESeqDataSetFromTximport(txi,
+                                   colData = samples,
+                                   design = ~ condition)
+```
+
+The `ddsTxi` object here can then be used as `dds` in the
+following analysis steps.
+
+<a name="countmat"/>
+
+### Count matrix input
+
+Alternatively, the function *DESeqDataSetFromMatrix* can be
+used if you already have a matrix of read counts prepared from another
+source. Another method for quickly producing count matrices 
+from alignment files is the *featureCounts* function [@Liao2013feature]
+in the [Rsubread](http://bioconductor.org/packages/Rsubread) package.
+To use *DESeqDataSetFromMatrix*, the user should provide 
+the counts matrix, the information about the samples (the columns of the 
+count matrix) as a *DataFrame* or *data.frame*, and the design formula.
+
+To demonstate the use of *DESeqDataSetFromMatrix*, 
+we will read in count data from the
+[pasilla](http://bioconductor.org/packages/pasilla) package. 
+We read in a count matrix, which we will name `cts`, 
+and the sample information table, which we will name `coldata`. 
+Further below we describe how to extract these objects from,
+e.g. *featureCounts* output. 
+
+```{r loadPasilla}
+library("pasilla")
+pasCts <- system.file("extdata",
+                      "pasilla_gene_counts.tsv",
+                      package="pasilla", mustWork=TRUE)
+pasAnno <- system.file("extdata",
+                       "pasilla_sample_annotation.csv",
+                       package="pasilla", mustWork=TRUE)
+cts <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
+coldata <- read.csv(pasAnno, row.names=1)
+coldata <- coldata[,c("condition","type")]
+```
+
+We examine the count matrix and column data to see if they are consisent:
+
+```{r showPasilla}
+head(cts)
+head(coldata)
+```
+
+Note that these are not in the same order with respect to samples! 
+
+It is critical that the columns of the count matrix and the rows of
+the column data (information about samples) are in the same order.
+We should re-arrange one or the other so that they are consistent in
+terms of sample order (if we do not, later functions would produce
+an error). We additionally need to chop off the `"fb"` of the 
+row names of `coldata`, so the naming is consistent.
+
+```{r reorderPasila}
+rownames(coldata) <- sub("fb","",rownames(coldata))
+all(rownames(coldata) %in% colnames(cts))
+cts <- cts[, rownames(coldata)]
+all(rownames(coldata) == colnames(cts))
+```
+
+If you have used the *featureCounts* function [@Liao2013feature] in the 
+[Rsubread](http://bioconductor.org/packages/Rsubread) package, the matrix of read counts can be directly 
+provided from the `"counts"` element in the list output.
+The count matrix and column data can typically be read into R 
+from flat files using base R functions such as *read.csv*
+or *read.delim*. For *htseq-count* files, see the dedicated input
+function below. 
+
+With the count matrix, `cts`, and the sample
+information, `coldata`, we can construct a *DESeqDataSet*:
+
+```{r matrixInput}
+library("DESeq2")
+dds <- DESeqDataSetFromMatrix(countData = cts,
+                              colData = coldata,
+                              design = ~ condition)
+dds
+```
+
+If you have additional feature data, it can be added to the
+*DESeqDataSet* by adding to the metadata columns of a newly
+constructed object. (Here we add redundant data just for demonstration, as
+the gene names are already the rownames of the `dds`.)
+
+```{r addFeatureData}
+featureData <- data.frame(gene=rownames(cts))
+mcols(dds) <- DataFrame(mcols(dds), featureData)
+mcols(dds)
+```
+
+<a name="htseq"/>
+
+### *htseq-count* input
+
+You can use the function *DESeqDataSetFromHTSeqCount* if you
+have used *htseq-count* from the 
+[HTSeq](http://www-huber.embl.de/users/anders/HTSeq) 
+python package [@Anders:2014:htseq].
+For an example of using the python scripts, see the
+[pasilla](http://bioconductor.org/packages/pasilla) data package. First you will want to specify a
+variable which points to the directory in which the *htseq-count*
+output files are located. 
+
+```{r htseqDirI, eval=FALSE}
+directory <- "/path/to/your/files/"
+```
+
+However, for demonstration purposes only, the following line of
+code points to the directory for the demo *htseq-count* output
+files packages for the [pasilla](http://bioconductor.org/packages/pasilla) package.
+
+```{r htseqDirII}
+directory <- system.file("extdata", package="pasilla",
+                         mustWork=TRUE)
+```
+
+We specify which files to read in using *list.files*,
+and select those files which contain the string `"treated"`
+using *grep*. The *sub* function is used to 
+chop up the sample filename to obtain the condition status, or 
+you might alternatively read in a phenotypic table 
+using *read.table*.
+
+```{r htseqInput}
+sampleFiles <- grep("treated",list.files(directory),value=TRUE)
+sampleCondition <- sub("(.*treated).*","\\1",sampleFiles)
+sampleTable <- data.frame(sampleName = sampleFiles,
+                          fileName = sampleFiles,
+                          condition = sampleCondition)
+```
+
+Then we build the *DESeqDataSet* using the following function:
+
+```{r hsteqDds}
+library("DESeq2")
+ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
+                                       directory = directory,
+                                       design= ~ condition)
+ddsHTSeq
+```
+
+<a name="se"/>
+
+### *SummarizedExperiment* input
+
+An example of the steps to produce a *RangedSummarizedExperiment* can
+be found in the [RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene/)
+and in the vignette for the data package [airway](http://bioconductor.org/packages/airway).
+Here we load the *RangedSummarizedExperiment* from that package in
+order to build a *DESeqDataSet*.
+
+```{r loadSumExp}
+library("airway")
+data("airway")
+se <- airway
+```
+The constructor function below shows the generation of a
+*DESeqDataSet* from a *RangedSummarizedExperiment* `se`.
+
+```{r sumExpInput}
+library("DESeq2")
+ddsSE <- DESeqDataSet(se, design = ~ cell + dex)
+ddsSE
+```
+
+### Pre-filtering
+
+While it is not necessary to pre-filter low count genes before running the DESeq2
+functions, there are two reasons which make pre-filtering useful:
+by removing rows in which there are no reads or nearly no reads,
+we reduce the memory size of the `dds` data object and 
+we increase the speed of the transformation
+and testing functions within DESeq2. Here we perform a minimal
+pre-filtering to remove rows that have only 0 or 1 read. Note that more strict
+filtering to increase power is *automatically* 
+applied via [independent filtering](#indfilt) on the mean of
+normalized counts within the *results* function. 
+
+```{r prefilter}
+dds <- dds[ rowSums(counts(dds)) > 1, ]
+``` 
+
+### Note on factor levels 
+
+By default, R will choose a *reference level* for factors based on
+alphabetical order. Then, if you never tell the DESeq2 functions which
+level you want to compare against (e.g. which level represents the
+control group), the comparisons will be based on the alphabetical
+order of the levels. There are two solutions: you can either
+explicitly tell *results* which comparison to make using the
+`contrast` argument (this will be shown later), or you can explicitly
+set the factors levels. Setting the factor levels can be done in two
+ways, either using factor:
+
+```{r factorlvl}
+dds$condition <- factor(dds$condition, levels=c("untreated","treated"))
+``` 
+
+...or using *relevel*, just specifying the reference level:
+
+```{r relevel}
+dds$condition <- relevel(dds$condition, ref="untreated")
+``` 
+
+If you need to subset the columns of a *DESeqDataSet*,
+i.e., when removing certain samples from the analysis, it is possible
+that all the samples for one or more levels of a variable in the design
+formula would be removed. In this case, the *droplevels* function can be used
+to remove those levels which do not have samples in the current *DESeqDataSet*:
+
+```{r droplevels}
+dds$condition <- droplevels(dds$condition)
+``` 
+
+### Collapsing technical replicates
+
+DESeq2 provides a function *collapseReplicates* which can
+assist in combining the counts from technical replicates into single
+columns of the count matrix. The term *technical replicate* 
+implies multiple sequencing runs of the same library. 
+You should not collapse biological replicates using this function.
+See the manual page for an example of the use of
+*collapseReplicates*.
+
+### About the pasilla dataset
+
+We continue with the [pasilla](http://bioconductor.org/packages/pasilla) data constructed from the
+count matrix method above. This data set is from an experiment on
+*Drosophila melanogaster* cell cultures and investigated the
+effect of RNAi knock-down of the splicing factor *pasilla*
+[@Brooks2010].  The detailed transcript of the production of
+the [pasilla](http://bioconductor.org/packages/pasilla) data is provided in the vignette of the 
+data package [pasilla](http://bioconductor.org/packages/pasilla).
+
+<a name="de"/>
+
+## Differential expression analysis 
+
+The standard differential expression analysis steps are wrapped
+into a single function, *DESeq*. The estimation steps performed
+by this function are described [below](#theory), in the manual page for
+`?DESeq` and in the Methods section of the DESeq2 publication [@Love2014]. 
+
+Results tables are generated using the function *results*, which
+extracts a results table with log2 fold changes, *p* values and adjusted
+*p* values. With no additional arguments to *results*, the log2 fold change and
+Wald test *p* value will be for the last variable in the design
+formula, and if this is a factor,
+the comparison will be the last level of this variable over the first
+level. However, the order of the variables of the design do not matter
+so long as the user specifies the comparison using the `name` or
+`contrast` arguments of *results* (described later and in `?results`).
+
+Details about the comparison are printed to the console, above the
+results table. The text, `condition treated vs untreated`, tells you that the
+estimates are of the logarithmic fold change log2(treated/untreated).
+
+```{r deseq}
+dds <- DESeq(dds)
+res <- results(dds)
+res
+``` 
+
+<a name="lfcShrink"/>
+
+In previous versions of DESeq2, the *DESeq* function by default 
+would produce moderated, or shrunken, log2 fold changes through the
+use of the `betaPrior` argument. In version 1.16 and higher, we have
+split the moderation of log2 fold changes into a separate function,
+*lfcShrink*, for reasons described in the [changes section](#changes)
+below. 
+
+Here we provide the `dds` object and the number of the
+coefficient we want to moderate. It is also possible to specify a
+`contrast`, instead of `coef`, which works the same as the `contrast`
+argument of the *results* function.
+If a results object is provided, the `log2FoldChange` column will be 
+swapped out, otherwise *lfcShrink* returns a vector of shrunken log2
+fold changes.
+
+```{r lfcShrink}
+resultsNames(dds)
+resLFC <- lfcShrink(dds, coef=2, res=res)
+resLFC
+```
+
+The above steps should take less than 30 seconds for most analyses. For
+experiments with many samples (e.g. 100 samples), one can take
+advantage of parallelized computation.  Both of the above functions
+have an argument `parallel` which if set to `TRUE` can
+be used to distribute computation across cores specified by the
+*register* function of [BiocParallel](http://bioconductor.org/packages/BiocParallel). For example,
+the following chunk (not evaluated here), would register 4 cores, and
+then the two functions above, with `parallel=TRUE`, would
+split computation over these cores. 
+
+```{r parallel, eval=FALSE}
+library("BiocParallel")
+register(MulticoreParam(4))
+```
+
+We can order our results table by the smallest adjusted *p* value:
+
+```{r resOrder}
+resOrdered <- res[order(res$padj),]
+```
+
+We can summarize some basic tallies using the
+*summary* function.
+
+```{r sumRes}
+summary(res)
+``` 
+
+How many adjusted p-values were less than 0.1?
+
+```{r sumRes01}
+sum(res$padj < 0.1, na.rm=TRUE)
+``` 
+
+The *results* function contains a number of arguments to
+customize the results table which is generated. You can read about
+these arguments by looking up `?results`.
+Note that the *results* function automatically performs independent
+filtering based on the mean of normalized counts for each gene,
+optimizing the number of genes which will have an adjusted *p* value
+below a given FDR cutoff, `alpha`.
+Independent filtering is further discussed [below](#indfilt).
+By default the argument `alpha` is set to $0.1$.  If the adjusted *p*
+value cutoff will be a value other than $0.1$, `alpha` should be set to
+that value:
+
+```{r resAlpha05}
+res05 <- results(dds, alpha=0.05)
+summary(res05)
+sum(res05$padj < 0.05, na.rm=TRUE)
+``` 
+
+<a name="IHW"/>
+
+A generalization of the idea of *p* value filtering is to *weight* hypotheses
+to optimize power. A Bioconductor package, [IHW](http://bioconductor.org/packages/IHW), is available
+that implements the method of *Independent Hypothesis Weighting* [@Ignatiadis2015].
+Here we show the use of *IHW* for *p* value adjustment of DESeq2 results.
+For more details, please see the vignette of the [IHW](http://bioconductor.org/packages/IHW) package.
+Note that the *IHW* result object is stored in the metadata.
+
+```{r IHW}
+library("IHW")
+resIHW <- results(dds, filterFun=ihw)
+summary(resIHW)
+sum(resIHW$padj < 0.1, na.rm=TRUE)
+metadata(resIHW)$ihwResult
+``` 
+
+If a multi-factor design is used, or if the variable in the design
+formula has more than two levels, the `contrast` argument of
+*results* can be used to extract different comparisons from
+the *DESeqDataSet* returned by *DESeq*.
+The use of the `contrast` argument is further discussed [below](#contrasts).
+
+For advanced users, note that all the values calculated by the DESeq2 
+package are stored in the *DESeqDataSet* object, and access 
+to these values is discussed [below](#access).
+
+## Exploring and exporting results
+
+### MA-plot
+
+In DESeq2, the function *plotMA* shows the log2
+fold changes attributable to a given variable over the mean of
+normalized counts for all the samples in the *DESeqDataSet*.
+Points will be colored red if the adjusted *p* value is less than 0.1.
+Points which fall out of the window are plotted as open triangles pointing 
+either up or down.
+
+```{r MA}
+plotMA(res, ylim=c(-2,2))
+```
+
+It is also useful to visualize the MA-plot for the shrunken log2 fold
+changes, which remove the noise associated with log2 fold changes from
+low count genes without requiring arbitrary filtering thresholds.
+
+```{r shrunkMA}
+plotMA(resLFC, ylim=c(-2,2))
+```
+
+After calling *plotMA*, one can use the function
+*identify* to interactively detect the row number of
+individual genes by clicking on the plot. One can then recover
+the gene identifiers by saving the resulting indices:
+
+```{r MAidentify, eval=FALSE}
+idx <- identify(res$baseMean, res$log2FoldChange)
+rownames(res)[idx]
+``` 
+
+### Plot counts 
+
+It can also be useful to examine the counts of reads for a single gene
+across the groups. A simple function for making this
+plot is *plotCounts*, which normalizes counts by sequencing depth
+and adds a pseudocount of 1/2 to allow for log scale plotting.
+The counts are grouped by the variables in `intgroup`, where
+more than one variable can be specified. Here we specify the gene
+which had the smallest *p* value from the results table created
+above. You can select the gene to plot by rowname or by numeric index.
+
+```{r plotCounts}
+plotCounts(dds, gene=which.min(res$padj), intgroup="condition")
+``` 
+
+For customized plotting, an argument `returnData` specifies
+that the function should only return a *data.frame* for
+plotting with *ggplot*.
+
+```{r plotCountsAdv}
+d <- plotCounts(dds, gene=which.min(res$padj), intgroup="condition", 
+                returnData=TRUE)
+library("ggplot2")
+ggplot(d, aes(x=condition, y=count)) + 
+  geom_point(position=position_jitter(w=0.1,h=0)) + 
+  scale_y_log10(breaks=c(25,100,400))
+``` 
+
+### More information on results columns 
+
+Information about which variables and tests were used can be found by calling
+the function *mcols* on the results object.
+
+```{r metadata}
+mcols(res)$description
+```
+
+For a particular gene, a log2 fold change of -1 for
+`condition treated vs untreated` means that the treatment
+induces a multiplicative change in observed gene expression level of
+$2^{-1} = 0.5$ compared to the untreated condition. If the variable of
+interest is continuous-valued, then the reported log2 fold change is
+per unit of change of that variable.
+
+<a name="pvaluesNA"/>
+
+**Note on p-values set to NA**: some values in the results table
+can be set to `NA` for one of the following reasons:
+
+* If within a row, all samples have zero counts, 
+  the `baseMean` column will be zero, and the
+  log2 fold change estimates, *p* value and adjusted *p* value
+  will all be set to `NA`.
+* If a row contains a sample with an extreme count outlier
+  then the *p* value and adjusted *p* value will be set to `NA`.
+  These outlier counts are detected by Cook's distance. Customization
+  of this outlier filtering and description of functionality for 
+  replacement of outlier counts and refitting is described 
+  [below](#outlier)
+* If a row is filtered by automatic independent filtering, 
+  for having a low mean normalized count, then only the adjusted *p*
+  value will be set to `NA`. 
+  Description and customization of independent filtering is 
+  described [below](#indfilt)
+
+### Rich visualization and reporting of results
+
+**ReportingTools.** An HTML report of the results with plots and sortable/filterable columns
+can be generated using the [ReportingTools](http://bioconductor.org/packages/ReportingTools) package
+on a *DESeqDataSet* that has been processed by the *DESeq* function.
+For a code example, see the *RNA-seq differential expression* vignette at
+the [ReportingTools](http://bioconductor.org/packages/ReportingTools) page, or the manual page for the 
+*publish* method for the *DESeqDataSet* class.
+
+**regionReport.** An HTML and PDF summary of the results with plots
+can also be generated using the [regionReport](http://bioconductor.org/packages/regionReport) package.
+The *DESeq2Report* function should be run on a 
+*DESeqDataSet* that has been processed by the *DESeq* function.
+For more details see the manual page for *DESeq2Report* 
+and an example vignette in the [regionReport](http://bioconductor.org/packages/regionReport) package.
+
+**Glimma.** Interactive visualization of DESeq2 output, 
+including MA-plots (also called MD-plot) can be generated using the
+[Glimma](http://bioconductor.org/packages/Glimma) package. See the manual page for *glMDPlot.DESeqResults*.
+
+**pcaExplorer.** Interactive visualization of DESeq2 output,
+including PCA plots, boxplots of counts and other useful summaries can be
+generated using the [pcaExplorer](http://bioconductor.org/packages/pcaExplorer) package.
+See the *Launching the application* section of the package vignette.
+
+### Exporting results to CSV files
+
+A plain-text file of the results can be exported using the 
+base R functions *write.csv* or *write.delim*. 
+We suggest using a descriptive file name indicating the variable
+and levels which were tested.
+
+```{r export, eval=FALSE}
+write.csv(as.data.frame(resOrdered), 
+          file="condition_treated_results.csv")
+```
+
+Exporting only the results which pass an adjusted *p* value
+threshold can be accomplished with the *subset* function,
+followed by the *write.csv* function.
+
+```{r subset}
+resSig <- subset(resOrdered, padj < 0.1)
+resSig
+``` 
+
+## Multi-factor designs
+
+Experiments with more than one factor influencing the counts can be
+analyzed using design formula that include the additional variables.
+In fact, DESeq2 can analyze any possible experimental design that can
+be expressed with fixed effects terms (multiple factors, designs with
+interactions, designs with continuous variables, splines, and so on
+are all possible).
+
+By adding variables to the design, one can control for additional variation
+in the counts. For example, if the condition samples are balanced
+across experimental batches, by including the `batch` factor to the
+design, one can increase the sensitivity for finding differences due
+to `condition`. There are multiple ways to analyze experiments when the
+additional variables are of interest and not just controlling factors 
+(see [section on interactions](#interactions)).
+
+The data in the [pasilla](http://bioconductor.org/packages/pasilla) 
+package have a condition of interest 
+(the column `condition`), as well as information on the type of sequencing 
+which was performed (the column `type`), as we can see below:
+
+```{r multifactor}
+colData(dds)
+```
+
+We create a copy of the *DESeqDataSet*, so that we can rerun
+the analysis using a multi-factor design.
+
+```{r copyMultifactor}
+ddsMF <- dds
+```
+
+We can account for the different types of sequencing, and get a clearer picture
+of the differences attributable to the treatment.  As `condition` is the
+variable of interest, we put it at the end of the formula. Thus the *results*
+function will by default pull the `condition` results unless 
+`contrast` or `name` arguments are specified. 
+Then we can re-run *DESeq*:
+
+```{r replaceDesign}
+design(ddsMF) <- formula(~ type + condition)
+ddsMF <- DESeq(ddsMF)
+```
+
+Again, we access the results using the *results* function.
+
+```{r multiResults}
+resMF <- results(ddsMF)
+head(resMF)
+```
+
+It is also possible to retrieve the log2 fold changes, *p* values and adjusted
+*p* values of the `type` variable. The `contrast` argument of 
+the function *results* takes a character vector of length three:
+the name of the variable, the name of the factor level for the numerator
+of the log2 ratio, and the name of the factor level for the denominator.
+The `contrast` argument can also take other forms, as
+described in the help page for *results* and [below](#contrasts)
+
+```{r multiTypeResults}
+resMFType <- results(ddsMF,
+                     contrast=c("type", "single-read", "paired-end"))
+head(resMFType)
+```
+
+If the variable is continuous or an interaction term
+(see [section on interactions](#interactions))
+then the results can be extracted using the `name` argument to *results*,
+where the name is one of elements returned by `resultsNames(dds)`.
+
+<a name="transform"/>
+
+# Data transformations and visualization 
+
+## Count data transformations
+
+In order to test for differential expression, we operate on raw counts
+and use discrete distributions as described in the previous section on
+differential expression.
+However for other downstream analyses --
+e.g. for visualization or clustering -- it might be useful 
+to work with transformed versions of the count data. 
+
+Maybe the most obvious choice of transformation is the logarithm.
+Since count values for a gene can be zero in some
+conditions (and non-zero in others), some advocate the use of
+*pseudocounts*, i.e. transformations of the form:
+
+$$ y = \log_2(n + n_0) $$
+
+where *n* represents the count values and $n_0$ is a positive constant.
+
+In this section, we discuss two alternative
+approaches that offer more theoretical justification and a rational way
+of choosing the parameter equivalent to $n_0$ above.
+The *regularized logarithm* or *rlog* incorporates a prior on
+the sample differences [@Love2014], 
+and the other uses the concept of variance stabilizing
+transformations (VST) [@Tibshirani1988; @sagmb2003; @Anders:2010:GB].
+Both transformations produce transformed data on the log2 scale
+which has been normalized with respect to library size.
+
+The point of these two transformations, the *rlog* and the VST,
+is to remove the dependence of the variance on the mean,
+particularly the high variance of the logarithm of count data when the
+mean is low. Both *rlog* and VST use the experiment-wide trend
+of variance over mean, in order to transform the data to remove the
+experiment-wide trend. Note that we do not require or
+desire that all the genes have *exactly* the same variance after
+transformation. Indeed, in a figure below, you will see
+that after the transformations the genes with the same mean do not
+have exactly the same standard deviations, but that the
+experiment-wide trend has flattened. It is those genes with row
+variance above the trend which will allow us to cluster samples into
+interesting groups.
+
+**Note on running time:** if you have many samples (e.g. 100s),
+the *rlog* function might take too long, and so the *vst* function
+will be a faster choice. 
+The rlog and VST have similar properties, but the rlog requires
+fitting a shrinkage term for each sample and each gene which takes
+time.  See the DESeq2 paper for more discussion on the differences
+[@Love2014].
+
+### Blind dispersion estimation
+
+The two functions, *rlog* and *vst* have an argument
+`blind`, for whether the transformation should be blind to the
+sample information specified by the design formula. When
+`blind` equals `TRUE` (the default), the functions
+will re-estimate the dispersions using only an intercept.
+This setting should be used in order to compare
+samples in a manner wholly unbiased by the information about
+experimental groups, for example to perform sample QA (quality
+assurance) as demonstrated below.
+
+However, blind dispersion estimation is not the appropriate choice if
+one expects that many or the majority of genes (rows) will have large
+differences in counts which are explainable by the experimental design,
+and one wishes to transform the data for downstream analysis. In this
+case, using blind dispersion estimation will lead to large estimates
+of dispersion, as it attributes differences due to experimental design
+as unwanted *noise*, and will result in overly shrinking the transformed
+values towards each other. 
+By setting `blind` to `FALSE`, the dispersions
+already estimated will be used to perform transformations, or if not
+present, they will be estimated using the current design formula. Note
+that only the fitted dispersion estimates from mean-dispersion trend
+line are used in the transformation (the global dependence of
+dispersion on mean for the entire experiment).
+So setting `blind` to `FALSE` is still for the most
+part not using the information about which samples were in which
+experimental group in applying the transformation.
+
+### Extracting transformed values
+
+These transformation functions return an object of class *DESeqTransform*
+which is a subclass of *RangedSummarizedExperiment*. 
+For ~20 samples, running on a newly created `DESeqDataSet`,
+*rlog* may take 30 seconds, 
+*varianceStabilizingTransformation* may take 5 seconds, and
+*vst* less than 1 second (by subsetting to 1000 genes for
+calculating the global dispersion trend).
+However, the running times are shorter and more similar with `blind=FALSE` and
+if the function *DESeq* has already been run, because then
+it is not necessary to re-estimate the dispersion values.
+The *assay* function is used to extract the matrix of normalized values.
+
+```{r rlogAndVST}
+rld <- rlog(dds, blind=FALSE)
+vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+vsd.fast <- vst(dds, blind=FALSE)
+head(assay(rld), 3)
+```
+
+### Regularized log transformation
+
+The function *rlog*, stands for *regularized log*,
+transforming the original count data to the log2 scale by fitting a
+model with a term for each sample and a prior distribution on the
+coefficients which is estimated from the data. This is the same kind
+of shrinkage (sometimes referred to as regularization, or moderation)
+of log fold changes used by the *DESeq* and
+*nbinomWaldTest*. The resulting data contains elements defined as:
+
+$$ \log_2(q_{ij}) = \beta_{i0} + \beta_{ij} $$
+
+where $q_{ij}$ is a parameter proportional to the expected true
+concentration of fragments for gene *i* and sample *j* (see
+formula [below](#theory)), $\beta_{i0}$ is an intercept which does not
+undergo shrinkage, and $\beta_{ij}$ is the sample-specific effect
+which is shrunk toward zero based on the dispersion-mean trend over
+the entire dataset. The trend typically captures high dispersions for
+low counts, and therefore these genes exhibit higher shrinkage from
+the *rlog*.
+
+Note that, as $q_{ij}$ represents the part of the mean value
+$\mu_{ij}$ after the size factor $s_j$ has been divided out, it is
+clear that the rlog transformation inherently accounts for differences
+in sequencing depth. Without priors, this design matrix would lead to
+a non-unique solution, however the addition of a prior on
+non-intercept betas allows for a unique solution to be found. 
+
+### Variance stabilizing transformation
+
+Above, we used a parametric fit for the dispersion. In this case, the
+closed-form expression for the variance stabilizing transformation is
+used by *varianceStabilizingTransformation*, which is
+derived in the file `vst.pdf`, that is distributed in the
+package alongside this vignette. If a local fit is used (option
+`fitType="locfit"` to *estimateDispersions*) a numerical integration
+is used instead. 
+
+### Effects of transformations on the variance
+
+The figure below plots the standard deviation of the transformed data,
+across samples, against the mean, using the shifted logarithm
+transformation, the regularized log transformation and the variance
+stabilizing transformation.  The shifted logarithm has elevated
+standard deviation in the lower count range, and the regularized log
+to a lesser extent, while for the variance stabilized data the
+standard deviation is roughly constant along the whole dynamic range.
+
+Note that the vertical axis in such plots is the square root of the
+variance over all samples, so including the variance due to the
+experimental conditions.  While a flat curve of the square root of
+variance over the mean may seem like the goal of such transformations,
+this may be unreasonable in the case of datasets with many true
+differences due to the experimental conditions.
+
+```{r meansd}
+# this gives log2(n + 1)
+ntd <- normTransform(dds)
+library("vsn")
+notAllZero <- (rowSums(counts(dds))>0)
+meanSdPlot(assay(ntd)[notAllZero,])
+meanSdPlot(assay(rld[notAllZero,]))
+meanSdPlot(assay(vsd[notAllZero,]))
+```
+
+## Data quality assessment by sample clustering and visualization
+
+Data quality assessment and quality control (i.e. the removal of
+insufficiently good data) are essential steps of any data
+analysis. These steps should typically be performed 
+very early in the analysis of a new data set,
+preceding or in parallel to the differential expression testing.
+
+We define the term *quality* as *fitness for purpose*.
+Our purpose is the detection of differentially expressed genes, and we
+are looking in particular for samples whose experimental treatment
+suffered from an anormality that renders the data points obtained from
+these particular samples detrimental to our purpose.
+
+### Heatmap of the count matrix
+
+To explore a count matrix, it is often instructive to look at it as a
+heatmap. Below we show how to produce such a heatmap for various
+transformations of the data. 
+
+```{r heatmap}
+library("pheatmap")
+select <- order(rowMeans(counts(dds,normalized=TRUE)),
+                decreasing=TRUE)[1:20]
+df <- as.data.frame(colData(dds)[,c("condition","type")])
+pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
+         cluster_cols=FALSE, annotation_col=df)
+pheatmap(assay(rld)[select,], cluster_rows=FALSE, show_rownames=FALSE,
+         cluster_cols=FALSE, annotation_col=df)
+pheatmap(assay(vsd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
+         cluster_cols=FALSE, annotation_col=df)
+```
+
+### Heatmap of the sample-to-sample distances
+
+Another use of the transformed data is sample clustering. Here, we apply the
+*dist* function to the transpose of the transformed count matrix to get
+sample-to-sample distances. We could alternatively use the variance stabilized
+transformation here.
+
+```{r sampleClust}
+sampleDists <- dist(t(assay(rld)))
+```
+
+A heatmap of this distance matrix gives us an overview over similarities
+and dissimilarities between samples.
+We have to provide a hierarchical clustering `hc` to the heatmap
+function based on the sample distances, or else the heatmap
+function would calculate a clustering based on the distances between
+the rows/columns of the distance matrix.
+
+```{r figHeatmapSamples}
+library("RColorBrewer")
+sampleDistMatrix <- as.matrix(sampleDists)
+rownames(sampleDistMatrix) <- paste(rld$condition, rld$type, sep="-")
+colnames(sampleDistMatrix) <- NULL
+colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
+pheatmap(sampleDistMatrix,
+         clustering_distance_rows=sampleDists,
+         clustering_distance_cols=sampleDists,
+         col=colors)
+```
+
+### Principal component plot of the samples
+
+Related to the distance matrix is the PCA plot, which shows 
+the samples in the 2D plane spanned by their first two principal
+components. This type of plot is useful for visualizing the overall
+effect of experimental covariates and batch effects.
+
+```{r figPCA}
+plotPCA(rld, intgroup=c("condition", "type"))
+```
+
+It is also possible to customize the PCA plot using the
+*ggplot* function.
+
+```{r figPCA2}
+pcaData <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
+percentVar <- round(100 * attr(pcaData, "percentVar"))
+ggplot(pcaData, aes(PC1, PC2, color=condition, shape=type)) +
+  geom_point(size=3) +
+  xlab(paste0("PC1: ",percentVar[1],"% variance")) +
+  ylab(paste0("PC2: ",percentVar[2],"% variance")) + 
+  coord_fixed()
+```
+
+# Variations to the standard workflow
+
+## Wald test individual steps 
+
+The function *DESeq* runs the following functions in order:
+
+```{r WaldTest, eval=FALSE}
+dds <- estimateSizeFactors(dds)
+dds <- estimateDispersions(dds)
+dds <- nbinomWaldTest(dds)
+```
+
+<a name="contrasts"/>
+
+## Contrasts 
+
+A contrast is a linear combination of estimated log2 fold changes,
+which can be used to test if differences between groups are equal to
+zero.  The simplest use case for contrasts is an experimental design
+containing a factor with three levels, say A, B and C.  Contrasts
+enable the user to generate results for all 3 possible differences:
+log2 fold change of B vs A, of C vs A, and of C vs B.
+The `contrast` argument of *results* function is
+used to extract test results of log2 fold changes of interest, for example:
+
+```{r simpleContrast, eval=FALSE}
+results(dds, contrast=c("condition","C","B"))
+``` 
+
+Log2 fold changes can also be added and subtracted by providing a
+`list` to the `contrast` argument which has two elements:
+the names of the log2 fold changes to add, and the names of the log2
+fold changes to subtract. The names used in the list should come from
+`resultsNames(dds)`.
+
+Alternatively, a numeric vector of the
+length of `resultsNames(dds)` can be provided, for manually
+specifying the linear combination of terms.  Demonstrations of the use
+of contrasts for various designs can be found in the examples section
+of the help page for the *results* function. The
+mathematical formula that is used to generate the contrasts can be
+found [below](#theory).
+
+<a name="interactions"/>
+
+## Interactions 
+
+Interaction terms can be added to the design formula, in order to
+test, for example, if the log2 fold change attributable to a given
+condition is *different* based on another factor, for example if the
+condition effect differs across genotype.
+
+Many users begin to add interaction terms to the design formula, when
+in fact a much simpler approach would give all the results tables that
+are desired. We will explain this approach first, because it is much
+simpler to perform.
+If the comparisons of interest are, for example, the effect
+of a condition for different sets of samples, a simpler approach than
+adding interaction terms explicitly to the design formula is to
+perform the following steps:
+
+* combine the factors of interest into a single factor with all
+  combinations of the original factors 
+* change the design to include just this factor, e.g. ~ group
+
+Using this design is similar to adding an interaction term, 
+in that it models multiple condition effects which
+can be easily extracted with *results*.
+Suppose we have two factors `genotype` (with values I, II, and III) 
+and `condition` (with values A and B), and we want to extract 
+the condition effect specifically for each genotype. We could use the
+following approach to obtain, e.g. the condition effect for genotype I: 
+
+```{r combineFactors, eval=FALSE}
+dds$group <- factor(paste0(dds$genotype, dds$condition))
+design(dds) <- ~ group
+dds <- DESeq(dds)
+resultsNames(dds)
+results(dds, contrast=c("group", "IB", "IA"))
+```
+
+The following two plots diagram hypothetical genotype-specific
+condition effects, which could be modeled with interaction terms by
+using a design of `~genotype + condition + genotype:condition`.
+
+In the first plot (Gene 1), note that the condition effect
+is consistent across genotypes. Although condition A has a different
+baseline for I,II, and III, the condition effect is a log2 fold
+change of about 2 for each genotype.  Using a model with an
+interaction term `genotype:condition`, the interaction terms for
+genotype II and genotype III will be nearly 0.
+
+Here, the y-axis represents log2(n+1), and each
+group has 20 samples (black dots). A red line connects the mean of the
+groups within each genotype. 
+
+```{r interFig, echo=FALSE, results="hide"}
+npg <- 20
+mu <- 2^c(8,10,9,11,10,12)
+cond <- rep(rep(c("A","B"),each=npg),3)
+geno <- rep(c("I","II","III"),each=2*npg)
+table(cond, geno)
+counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
+d <- data.frame(log2c=log2(counts+1), cond, geno)
+library(ggplot2)
+plotit <- function(d, title) {
+  ggplot(d, aes(x=cond, y=log2c, group=geno)) + 
+    geom_jitter(size=1.5, position = position_jitter(width=.15)) +
+    facet_wrap(~ geno) + 
+    stat_summary(fun.y=mean, geom="line", colour="red", size=0.8) + 
+    xlab("condition") + ylab("log2(counts+1)") + ggtitle(title)
+}
+plotit(d, "Gene 1") + ylim(7,13)
+lm(log2c ~ cond + geno + geno:cond, data=d)
+``` 
+
+In the second plot
+(Gene 2), we can see that the condition effect is not consistent
+across genotype. Here the main condition effect (the effect for the
+reference genotype I) is again 2. However, this time the interaction
+terms will be around 1 for genotype II and -4 for genotype III. This
+is because the condition effect is higher by 1 for genotype II
+compared to genotype I, and lower by 4 for genotype III compared to
+genotype I.  The condition effect for genotype II (or III) is
+obtained by adding the main condition effect and the interaction
+term for that genotype.  Such a plot can be made using the
+*plotCounts* function as shown above.
+
+```{r interFig2, echo=FALSE, results="hide"}
+mu[4] <- 2^12
+mu[6] <- 2^8
+counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
+d2 <- data.frame(log2c=log2(counts + 1), cond, geno)
+plotit(d2, "Gene 2") + ylim(7,13)
+lm(log2c ~ cond + geno + geno:cond, data=d2)
+``` 
+
+Now we will continue to explain the use of interactions in order to
+test for *differences* in condition effects. We continue with
+the example of condition effects across three genotypes (I, II, and III).
+
+The key point to remember about designs with interaction terms is
+that, unlike for a design `~genotype + condition`, where the condition
+effect represents the 
+*overall* effect controlling for differences due to genotype, by adding
+`genotype:condition`, the main condition effect only
+represents the effect of condition for the *reference level* of
+genotype (I, or whichever level was defined by the user as the
+reference level). The interaction terms `genotypeII.conditionB`
+and `genotypeIII.conditionB` give the *difference*
+between the condition effect for a given genotype and the condition
+effect for the reference genotype. 
+
+This genotype-condition interaction example is examined in further
+detail in Example 3 in the help page for *results*, which
+can be found by typing `?results`. In particular, we show how to
+test for differences in the condition effect across genotype, and we
+show how to obtain the condition effect for non-reference genotypes.
+
+Note that for DESeq2 versions higher than 1.10, the *DESeq* function
+will turn off log fold change shrinkage (setting `betaPrior=FALSE`),
+for designs which contain an interaction term. Turning off the log
+fold change shrinkage allows the software to use standard model
+matrices (as would be produced by *model.matrix*), where the
+interaction coefficients are easier to interpret.
+
+## Time-series experiments
+
+There are a number of ways to analyze time-series experiments,
+depending on the biological question of interest. In order to test for
+any differences over multiple time points, once can use a design
+including the time factor, and then test using the likelihood ratio
+test as described in the following section, where the time factor is
+removed in the reduced formula. For a control and treatment time
+series, one can use a design formula containing the condition factor,
+the time factor, and the interaction of the two. In this case, using
+the likelihood ratio test with a reduced model which does not contain
+the interaction terms will test whether the condition induces a change
+in gene expression at any time point after the reference level time point
+(time 0). An example of the later analysis is provided in our
+[RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene).
+
+## Likelihood ratio test 
+
+DESeq2 offers two kinds of hypothesis tests: the Wald test, where
+we use the estimated standard error of a log2 fold change to test if it is
+equal to zero, and the likelihood ratio test (LRT). The LRT examines
+two models for the counts, a *full* model with a certain number
+of terms and a *reduced* model, in which some of the terms of the
+*full* model are removed. The test determines if the increased
+likelihood of the data using the extra terms in the *full* model
+is more than expected if those extra terms are truly zero.
+
+The LRT is therefore useful for testing multiple
+terms at once, for example testing 3 or more levels of a factor at once,
+or all interactions between two variables. 
+The LRT for count data is conceptually similar to an analysis of variance (ANOVA)
+calculation in linear regression, except that in the case of the Negative
+Binomial GLM, we use an analysis of deviance (ANODEV), where the
+*deviance* captures the difference in likelihood between a full
+and a reduced model.
+
+The likelihood ratio test can be performed by specifying `test="LRT"`
+when using the *DESeq* function, and
+providing a reduced design formula, e.g. one in which a
+number of terms from `design(dds)` are removed.
+The degrees of freedom for the test is obtained from the difference
+between the number of parameters in the two models. 
+A simple likelihood ratio test, if the full design was
+`~condition` would look like:
+
+```{r simpleLRT, eval=FALSE}
+dds <- DESeq(dds, test="LRT", reduced=~1)
+res <- results(dds)
+``` 
+
+If the full design contained other variables, 
+such as a batch variable, e.g. `~batch + condition`
+then the likelihood ratio test would look like:
+
+```{r simpleLRT2, eval=FALSE}
+dds <- DESeq(dds, test="LRT", reduced=~batch)
+res <- results(dds)
+``` 
+
+<a name="outlier"/>
+
+## Approach to count outliers 
+
+RNA-seq data sometimes contain isolated instances of very large counts
+that are apparently unrelated to the experimental or study design, and
+which may be considered outliers. There are many reasons why outliers
+can arise, including rare technical or experimental artifacts, read
+mapping problems in the case of genetically differing samples, and
+genuine, but rare biological events. In many cases, users appear
+primarily interested in genes that show a consistent behavior, and
+this is the reason why by default, genes that are affected by such
+outliers are set aside by DESeq2, or if there are sufficient samples,
+outlier counts are replaced for model fitting.  These two behaviors
+are described below.
+
+The *DESeq* function calculates, for every gene and for every sample,
+a diagnostic test for outliers called *Cook's distance*. Cook's distance 
+is a measure of how much a single sample is influencing the fitted 
+coefficients for a gene, and a large value of Cook's distance is 
+intended to indicate an outlier count. 
+The Cook's distances are stored as a matrix available in 
+`assays(dds)[["cooks"]]`.
+
+The *results* function automatically flags genes which contain a 
+Cook's distance above a cutoff for samples which have 3 or more replicates. 
+The *p* values and adjusted *p* values for these genes are set to `NA`. 
+At least 3 replicates are required for flagging, as it is difficult to judge
+which sample might be an outlier with only 2 replicates.
+This filtering can be turned off with `results(dds, cooksCutoff=FALSE)`.
+
+With many degrees of freedom -- i.\,e., many more samples than number of parameters to 
+be estimated -- it is undesirable to remove entire genes from the analysis
+just because their data include a single count outlier. When there
+are 7 or more replicates for a given sample, the *DESeq*
+function will automatically replace counts with large Cook's distance 
+with the trimmed mean over all samples, scaled up by the size factor or 
+normalization factor for that sample. This approach is conservative, 
+it will not lead to false positives, as it replaces
+the outlier value with the value predicted by the null hypothesis.
+This outlier replacement only occurs when there are 7 or more
+replicates, and can be turned off with 
+`DESeq(dds, minReplicatesForReplace=Inf)`.
+
+The default Cook's distance cutoff for the two behaviors described above
+depends on the sample size and number of parameters
+to be estimated. The default is to use the 99% quantile of the 
+F(p,m-p) distribution (with *p* the number of parameters including the 
+intercept and *m* number of samples).
+The default for gene flagging can be modified using the `cooksCutoff` 
+argument to the *results* function. 
+For outlier replacement, *DESeq* preserves the original counts in
+`counts(dds)` saving the replacement counts as a matrix named
+`replaceCounts` in `assays(dds)`.
+Note that with continuous variables in the design, outlier detection
+and replacement is not automatically performed, as our 
+current methods involve a robust estimation of within-group variance
+which does not extend easily to continuous covariates. However, users
+can examine the Cook's distances in `assays(dds)[["cooks"]]`, in
+order to perform manual visualization and filtering if necessary.
+
+**Note on many outliers:** if there are very many outliers (e.g. many
+hundreds or thousands) reported by `summary(res)`, one might consider
+further exploration to see if a single sample or a few samples should
+be removed due to low quality.  The automatic outlier
+filtering/replacement is most useful in situations which the number of
+outliers is limited. When there are thousands of reported outliers, it
+might make more sense to turn off the outlier filtering/replacement
+(*DESeq* with `minReplicatesForReplace=Inf` and *results* with
+`cooksCutoff=FALSE`) and perform manual inspection: First it would be
+advantageous to make a PCA plot as described above to spot individual
+sample outliers; Second, one can make a boxplot of the Cook's
+distances to see if one sample is consistently higher than others
+(here this is not the case):
+
+```{r boxplotCooks}
+par(mar=c(8,5,2,2))
+boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
+```
+
+## Dispersion plot and fitting alternatives
+
+Plotting the dispersion estimates is a useful diagnostic. The dispersion
+plot below is typical, with the final estimates shrunk
+from the gene-wise estimates towards the fitted estimates. Some gene-wise
+estimates are flagged as outliers and not shrunk towards the fitted value,
+(this outlier detection is described in the manual page for *estimateDispersionsMAP*).
+The amount of shrinkage can be more or less than seen here, depending 
+on the sample size, the number of coefficients, the row mean
+and the variability of the gene-wise estimates.
+
+```{r dispFit}
+plotDispEsts(dds)
+```
+
+### Local or mean dispersion fit
+
+A local smoothed dispersion fit is automatically substitited in the case that
+the parametric curve doesn't fit the observed dispersion mean relationship.
+This can be prespecified by providing the argument
+`fitType="local"` to either *DESeq* or *estimateDispersions*.
+Additionally, using the mean of gene-wise disperion estimates as the
+fitted value can be specified by providing the argument `fitType="mean"`. 
+
+### Supply a custom dispersion fit
+
+Any fitted values can be provided during dispersion estimation, using
+the lower-level functions described in the manual page for
+*estimateDispersionsGeneEst*. In the code chunk below, we
+store the gene-wise estimates which were already calculated and saved 
+in the metadata column `dispGeneEst`. Then we calculate the
+median value of the dispersion estimates above a threshold, and save
+these values as the fitted dispersions, using the replacement function
+for *dispersionFunction*. In the last line, the function
+*estimateDispersionsMAP*, uses the 
+fitted dispersions to generate maximum *a posteriori* (MAP)
+estimates of dispersion. 
+
+```{r dispFitCustom}
+ddsCustom <- dds
+useForMedian <- mcols(ddsCustom)$dispGeneEst > 1e-7
+medianDisp <- median(mcols(ddsCustom)$dispGeneEst[useForMedian],
+                     na.rm=TRUE)
+dispersionFunction(ddsCustom) <- function(mu) medianDisp
+ddsCustom <- estimateDispersionsMAP(ddsCustom)
+```
+
+<a name="indfilt"/>
+
+## Independent filtering of results
+
+The *results* function of the DESeq2 package performs independent
+filtering by default using the mean of normalized counts as a filter
+statistic.  A threshold on the filter statistic is found which
+optimizes the number of adjusted *p* values lower than a significance
+level `alpha` (we use the standard variable name for significance
+level, though it is unrelated to the dispersion parameter $\alpha$).
+The theory behind independent filtering is discussed in greater detail
+[below](#indfilttheory). The adjusted *p* values for the genes
+which do not pass the filter threshold are set to `NA`.
+
+The default independent filtering is performed using the *filtered_p*
+function of the [genefilter](http://bioconductor.org/packages/genefilter) package, and all of the
+arguments of *filtered_p* can be passed to the *results* function.
+The filter threshold value and the number of rejections at each
+quantile of the filter statistic are available as metadata of the
+object returned by *results*.
+
+For example, we can visualize the optimization by plotting the
+`filterNumRej` attribute of the results object. The *results* function
+maximizes the number of rejections (adjusted *p* value less than a
+significance level), over the quantiles of a filter statistic (the
+mean of normalized counts). The threshold chosen (vertical line) is
+the lowest quantile of the filter for which the number of rejections
+is within 1 residual standard deviation to the peak of a curve fit to
+the number of rejections over the filter quantiles:
+
+```{r filtByMean}
+metadata(res)$alpha
+metadata(res)$filterThreshold
+plot(metadata(res)$filterNumRej, 
+     type="b", ylab="number of rejections",
+     xlab="quantiles of filter")
+lines(metadata(res)$lo.fit, col="red")
+abline(v=metadata(res)$filterTheta)
+```
+
+Independent filtering can be turned off by setting 
+`independentFiltering` to `FALSE`.
+
+```{r noFilt}
+resNoFilt <- results(dds, independentFiltering=FALSE)
+addmargins(table(filtering=(res$padj < .1),
+                 noFiltering=(resNoFilt$padj < .1)))
+``` 
+
+## Tests of log2 fold change above or below a threshold
+
+It is also possible to provide thresholds for constructing
+Wald tests of significance. Two arguments to the *results*
+function allow for threshold-based Wald tests: `lfcThreshold`,
+which takes a numeric of a non-negative threshold value, 
+and `altHypothesis`, which specifies the kind of test.
+Note that the *alternative hypothesis* is specified by the user, 
+i.e. those genes which the user is interested in finding, and the test 
+provides *p* values for the null hypothesis, the complement of the set 
+defined by the alternative. The `altHypothesis` argument can take one 
+of the following four values, where $\beta$ is the log2 fold change
+specified by the `name` argument, and $x$ is the `lfcThreshold`.
+
+* `greaterAbs` - $|\beta| > x$ - tests are two-tailed
+* `lessAbs` - $|\beta| < x$ - *p* values are the maximum of the upper and lower tests
+* `greater` - $\beta > x$
+* `less` - $\beta < -x$
+
+The test `altHypothesis="lessAbs"` requires that the user have
+run *DESeq* with the argument `betaPrior=FALSE`.  To
+understand the reason for this requirement, consider that during
+hypothesis testing, the null hypothesis is favored unless the data
+provide strong evidence to reject the null.  For this test, including
+a zero-centered prior on log fold change would favor the alternative
+hypothesis, shrinking log fold changes toward zero.  Removing the
+prior on log fold changes for tests of small log fold change allows
+for detection of only those genes where the data alone provides
+evidence against the null.
+
+The four possible values of `altHypothesis` are demonstrated
+in the following code and visually by MA-plots in the following figures.
+First we run *DESeq* and specify `betaPrior=FALSE` in order 
+to demonstrate `altHypothesis="lessAbs"`.
+
+```{r ddsNoPrior}
+ddsNoPrior <- DESeq(dds, betaPrior=FALSE)
+```
+
+In order to produce results tables for the following tests, the same arguments
+(except `ylim`) would be provided to the *results* function. 
+
+```{r lfcThresh}
+par(mfrow=c(2,2),mar=c(2,2,1,1))
+yl <- c(-2.5,2.5)
+resGA <- results(dds, lfcThreshold=.5, altHypothesis="greaterAbs")
+resLA <- results(ddsNoPrior, lfcThreshold=.5, altHypothesis="lessAbs")
+resG <- results(dds, lfcThreshold=.5, altHypothesis="greater")
+resL <- results(dds, lfcThreshold=.5, altHypothesis="less")
+plotMA(resGA, ylim=yl)
+abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
+plotMA(resLA, ylim=yl)
+abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
+plotMA(resG, ylim=yl)
+abline(h=.5,col="dodgerblue",lwd=2)
+plotMA(resL, ylim=yl)
+abline(h=-.5,col="dodgerblue",lwd=2)
+``` 
+
+<a name="access"/>
+
+## Access to all calculated values
+
+All row-wise calculated values (intermediate dispersion calculations,
+coefficients, standard errors, etc.) are stored in the *DESeqDataSet* 
+object, e.g. `dds` in this vignette. These values are accessible 
+by calling *mcols* on `dds`. 
+Descriptions of the columns are accessible by two calls to 
+*mcols*. Note that the call to `substr` below is only for display
+purposes.
+
+```{r mcols}
+mcols(dds,use.names=TRUE)[1:4,1:4]
+substr(names(mcols(dds)),1,10) 
+mcols(mcols(dds), use.names=TRUE)[1:4,]
+```
+
+The mean values $\mu_{ij} = s_j q_{ij}$ and the Cook's distances for each gene and
+sample are stored as matrices in the assays slot:
+
+```{r muAndCooks}
+head(assays(dds)[["mu"]])
+head(assays(dds)[["cooks"]])
+``` 
+
+The dispersions $\alpha_i$ can be accessed with the
+*dispersions* function.
+
+```{r dispersions}
+head(dispersions(dds))
+head(mcols(dds)$dispersion)
+``` 
+
+The size factors $s_j$ are accessible via *sizeFactors*:
+
+```{r sizefactors}
+sizeFactors(dds)
+``` 
+
+For advanced users, we also include a convenience function *coef* for 
+extracting the matrix $[\beta_{ir}]$ for all genes *i* and
+model coefficients $r$.
+This function can also return a matrix of standard errors, see `?coef`.
+The columns of this matrix correspond to the effects returned by *resultsNames*.
+Note that the *results* function is best for building 
+results tables with *p* values and adjusted *p* values.
+
+```{r coef}
+head(coef(dds))
+``` 
+
+The beta prior variance $\sigma_r^2$ is stored as an attribute of the
+*DESeqDataSet*: 
+
+```{r betaPriorVar}
+attr(dds, "betaPriorVar")
+``` 
+
+The dispersion prior variance $\sigma_d^2$ is stored as an
+attribute of the dispersion function:
+
+```{r dispPriorVar}
+dispersionFunction(dds)
+attr(dispersionFunction(dds), "dispPriorVar")
+``` 
+
+The version of DESeq2 which was used to construct the
+*DESeqDataSet* object, or the version used when
+*DESeq* was run, is stored here:
+
+```{r versionNum}
+metadata(dds)[["version"]]
+``` 
+
+## Sample-/gene-dependent normalization factors 
+
+In some experiments, there might be gene-dependent dependencies
+which vary across samples. For instance, GC-content bias or length
+bias might vary across samples coming from different labs or
+processed at different times. We use the terms *normalization factors*
+for a gene x sample matrix, and *size factors* for a
+single number per sample.  Incorporating normalization factors,
+the mean parameter $\mu_{ij}$ becomes:
+
+$$ \mu_{ij} = NF_{ij} q_{ij} $$
+
+with normalization factor matrix *NF* having the same dimensions
+as the counts matrix *K*. This matrix can be incorporated as shown
+below. We recommend providing a matrix with row-wise geometric means of 1, 
+so that the mean of normalized counts for a gene is close to the mean
+of the unnormalized counts.
+This can be accomplished by dividing out the current row geometric means.
+
+```{r normFactors, eval=FALSE}
+normFactors <- normFactors / exp(rowMeans(log(normFactors)))
+normalizationFactors(dds) <- normFactors
+```
+
+These steps then replace *estimateSizeFactors* which occurs within the
+*DESeq* function. The *DESeq* function will look for pre-existing
+normalization factors and use these in the place of size factors
+(and a message will be printed confirming this).
+
+The methods provided by the
+[cqn](http://bioconductor.org/packages/cqn) or 
+[EDASeq](http://bioconductor.org/packages/EDASeq) packages
+can help correct for GC or length biases. They both describe in their
+vignettes how to create matrices which can be used by DESeq2.
+From the formula above, we see that normalization factors should be on
+the scale of the counts, like size factors, and unlike offsets which
+are typically on the scale of the predictors (i.e. the logarithmic scale for
+the negative binomial GLM). At the time of writing, the transformation
+from the matrices provided by these packages should be:
+
+```{r offsetTransform, eval=FALSE}
+cqnOffset <- cqnObject$glm.offset
+cqnNormFactors <- exp(cqnOffset)
+EDASeqNormFactors <- exp(-1 * EDASeqOffset)
+```
+
+## "Model matrix not full rank"
+
+While most experimental designs run easily using design formula, some
+design formulas can cause problems and result in the *DESeq*
+function returning an error with the text: "the model matrix is not
+full rank, so the model cannot be fit as specified."  There are two
+main reasons for this problem: either one or more columns in the model
+matrix are linear combinations of other columns, or there are levels
+of factors or combinations of levels of multiple factors which are
+missing samples. We address these two problems below and discuss
+possible solutions:
+
+### Linear combinations
+
+The simplest case is the linear combination, or linear dependency
+problem, when two variables contain exactly the same information, such
+as in the following sample table. The software cannot fit an effect
+for `batch` and `condition`, because they produce
+identical columns in the model matrix. This is also referred to as
+*perfect confounding*. A unique solution of coefficients (the $\beta_i$ in
+the formula [below](#theory)) is not possible.
+
+```{r lineardep, echo=FALSE}
+DataFrame(batch=factor(c(1,1,2,2)), condition=factor(c("A","A","B","B")))
+``` 
+
+Another situation which will cause problems is when the variables are
+not identical, but one variable can be formed by the combination of
+other factor levels. In the following example, the effect of batch 2
+vs 1 cannot be fit because it is identical to a column in the model
+matrix which represents the condition C vs A effect.
+
+```{r lineardep2, echo=FALSE}
+DataFrame(batch=factor(c(1,1,1,1,2,2)), condition=factor(c("A","A","B","B","C","C")))
+``` 
+
+In both of these cases above, the batch effect cannot be fit and must
+be removed from the model formula. There is just no way to tell apart
+the condition effects and the batch effects. The options are either to assume
+there is no batch effect (which we know is highly unlikely given the
+literature on batch effects in sequencing datasets) or to repeat the
+experiment and properly balance the conditions across batches.
+A balanced design would look like:
+
+```{r lineardep3, echo=FALSE}
+DataFrame(batch=factor(c(1,1,1,2,2,2)), condition=factor(c("A","B","C","A","B","C")))
+``` 
+
+<a name="nested-indiv"/>
+
+### Group-specific condition effects, individuals nested within groups
+
+Finally, there is a case where we *can* in fact perform inference, but
+we may need to re-arrange terms to do so. Consider an experiment with
+grouped individuals, where we seek to test the group-specific effect
+of a condition or treatment, while controlling for individual
+effects. The individuals are nested within the groups: an individual
+can only be in one of the groups, although each individual has one or
+more observations across condition.
+
+An example of such an experiment is below:
+
+```{r groupeffect}
+coldata <- DataFrame(grp=factor(rep(c("X","Y"),each=6)),
+                       ind=factor(rep(1:6,each=2)),
+                      cnd=factor(rep(c("A","B"),6)))
+coldata
+```
+
+Note that individual (`ind`) is a *factor* not a numeric. This is very
+important. 
+
+To make R display all the rows, we can do:
+
+```{r}
+as.data.frame(coldata)
+```
+
+We have two groups of samples X and Y, each with three distinct
+individuals (labeled here 1-6). For each individual, we have
+conditions A and B (for example, this could be control and treated).
+
+This design can be analyzed by DESeq2 but requires a bit of
+refactoring in order to fit the model terms. Here we will use a trick
+described in the [edgeR](http://bioconductor.org/packages/edgeR) user
+guide, from the section 
+*Comparisons Both Between and Within Subjects*.  If we try to
+analyze with a formula such as, `~ ind + grp*cnd`, we will
+obtain an error, because the effect for group is a linear combination
+of the individuals.
+
+However, the following steps allow for an analysis of group-specific
+condition effects, while controlling for differences in individual.
+For object construction, you can use a simple design, such as 
+`~ ind + cnd`, as
+long as you remember to replace it before running *DESeq*.
+Then add a column `ind.n` which distinguishes the
+individuals nested within a group. Here, we add this column to
+coldata, but in practice you would add this column to `dds`.
+
+```{r groupeffect2}
+coldata$ind.n <- factor(rep(rep(1:3,each=2),2))
+as.data.frame(coldata)
+``` 
+
+Now we can reassign our *DESeqDataSet* a design of
+`~ grp + grp:ind.n + grp:cnd`, before we call
+*DESeq*. This new design will result in the following model
+matrix: 
+
+```{r groupeffect3}
+model.matrix(~ grp + grp:ind.n + grp:cnd, coldata)
+``` 
+
+Note that, if you have unbalanced numbers of individuals in the two
+groups, you will have zeros for some of the interactions between `grp`
+and `ind.n`. You can remove these columns manually from the model
+matrix and pass the corrected model matrix to the `full` argument of
+the *DESeq* function. See example code in the next section.
+
+Above, the terms `grpX.cndB` and `grpY.cndB` give the
+group-specific condition effects, in other words, the condition B vs A
+effect for group X samples, and likewise for group Y samples. These
+terms control for all of the six individual effects.
+These group-specific condition effects can be extracted using
+*results* with the `name` argument. 
+
+Furthermore, `grpX.cndB` and `grpY.cndB` can be contrasted using the
+`contrast` argument, in order to test if the condition effect is
+different across group: 
+
+```{r groupeffect4, eval=FALSE}
+results(dds, contrast=list("grpY.cndB","grpX.cndB"))
+``` 
+
+### Levels without samples
+
+The base R function for creating model matrices will produce a column
+of zeros if a level is missing from a factor or a combination of
+levels is missing from an interaction of factors. The solution to the
+first case is to call *droplevels* on the column, which will
+remove levels without samples. This was shown in the beginning of this
+vignette.
+
+The second case is also solvable, by manually editing the model
+matrix, and then providing this to *DESeq*. Here we
+construct an example dataset to illustrate:
+
+```{r missingcombo}
+group <- factor(rep(1:3,each=6))
+condition <- factor(rep(rep(c("A","B","C"),each=2),3))
+d <- DataFrame(group, condition)[-c(17,18),]
+as.data.frame(d)
+``` 
+
+Note that if we try to estimate all interaction terms, we introduce a
+column with all zeros, as there are no condition C samples for group
+3. (Here, *unname* is used to display the matrix concisely.)
+
+```{r missingcombo2}
+m1 <- model.matrix(~ condition*group, d)
+colnames(m1)
+unname(m1)
+all.zero <- apply(m1, 2, function(x) all(x==0))
+all.zero
+``` 
+
+We can remove this column like so:
+
+```{r missingcombo3}
+idx <- which(all.zero)
+m1 <- m1[,-idx]
+unname(m1)
+``` 
+
+Now this matrix `m1` can be provided to the `full`
+argument of *DESeq*.  For a likelihood ratio test of
+interactions, a model matrix using a reduced design such as
+`~ condition + group` can be given to the `reduced`
+argument. Wald tests can also be generated instead of the likelihood
+ratio test, but for user-supplied model matrices, the argument
+`betaPrior` must be set to `FALSE`.
+
+<a name="theory"/>
+
+# Theory behind DESeq2
+
+## The DESeq2 model 
+
+The DESeq2 model and all the steps taken in the software
+are described in detail in our publication [@Love2014],
+and we include the formula and descriptions in this section as well.
+The differential expression analysis in DESeq2 uses a generalized
+linear model of the form:
+
+$$ K_{ij} \sim \textrm{NB}(\mu_{ij}, \alpha_i) $$
+
+$$ \mu_{ij} = s_j q_{ij} $$
+
+$$ \log_2(q_{ij}) = x_{j.} \beta_i $$
+
+where counts $K_{ij}$ for gene *i*, sample *j* are modeled using
+a negative binomial distribution with fitted mean $\mu_{ij}$
+and a gene-specific dispersion parameter $\alpha_i$.
+The fitted mean is composed of a sample-specific size factor
+$s_j$ and a parameter $q_{ij}$ 
+proportional to the expected true concentration of fragments for sample *j*.
+The coefficients $\beta_i$ give the log2 fold changes for gene *i* for each 
+column of the model matrix $X$. 
+Note that the model can be generalized to use sample- and
+gene-dependent normalization factors $s_{ij}$. 
+
+The dispersion parameter $\alpha_i$ defines the relationship between
+the variance of the observed count and its mean value. In other
+words, how far do we expected the observed count will be from the
+mean value, which depends both on the size factor $s_j$ and the
+covariate-dependent part $q_{ij}$ as defined above.
+
+$$ \textrm{Var}(K_{ij}) = E[ (K_{ij} - \mu_{ij})^2 ] = \mu_{ij} + \alpha_i \mu_{ij}^2 $$
+
+An option in DESeq2 is to provide maximum *a posteriori*
+estimates of the log2 fold changes in $\beta_i$ after incorporating a 
+zero-centered Normal prior (`betaPrior`). While previously,
+these moderated, or shrunken, estimates were generated by
+*DESeq* or *nbinomWaldTest* functions, they are now produced by the
+*lfcShrink* function.
+Dispersions are estimated using expected mean values from the maximum
+likelihood estimate of log2 fold changes, and optimizing the Cox-Reid 
+adjusted profile likelihood, as first implemented for RNA-seq data in
+[edgeR](http://bioconductor.org/packages/edgeR) 
+[@CR,edgeR_GLM]. The steps performed by the *DESeq* function are
+documented in its manual page `?DESeq`; briefly, they are:
+
+1) estimation of size factors $s_j$ by *estimateSizeFactors*
+2) estimation of dispersion $\alpha_i$ by *estimateDispersions*
+3) negative binomial GLM fitting for $\beta_i$ and Wald statistics by 
+*nbinomWaldTest*
+
+For access to all the values calculated during these steps, see the
+section [above](#access).
+
+## Changes compared to DESeq
+
+The main changes in the package *DESeq2*, compared to the (older)
+version *DESeq*, are as follows: 
+
+* *RangedSummarizedExperiment* is used as the superclass for storage of input data,
+  intermediate calculations and results.
+* Optional, maximum *a posteriori* estimation of GLM coefficients
+  incorporating a zero-centered Normal prior with variance estimated
+  from data (equivalent to Tikhonov/ridge regularization). This
+  adjustment has little effect on genes with high counts, yet it helps
+  to moderate the otherwise large variance in log2 fold change
+  estimates for genes with low counts or highly variable counts.
+  These estimates are now provided by the *lfcShrink* function.
+* Maximum *a posteriori* estimation of dispersion replaces the
+  `sharingMode` options `fit-only` or `maximum` of the previous version
+  of the package. This is similar to the dispersion estimation methods of DSS [@Wu2012New].
+* All estimation and inference is based on the generalized linear model, which
+  includes the two condition case (previously the *exact test* was used).
+* The Wald test for significance of GLM coefficients is provided as the default
+  inference method, with the likelihood ratio test of the previous version still available.
+* It is possible to provide a matrix of sample-/gene-dependent
+  normalization factors.
+* Automatic independent filtering on the mean of normalized counts.
+* Automatic outlier detection and handling.
+
+<a name="changes"/>
+
+## Methods changes since the 2014 DESeq2 paper
+
+* In version 1.16 (Novermber 2016), the log2 fold change 
+  shrinkage is no longer default for the *DESeq* and *nbinomWaldTest*
+  functions, by setting the defaults of these to `betaPrior=FALSE`,
+  and by introducing a separate function *lfcShrink*, which performs
+  log2 fold change shrinkage for visualization and ranking of genes.
+  While for the majority of bulk RNA-seq experiments, the LFC
+  shrinkage did not affect statistical testing, DESeq2 has become used
+  as an inference engine by a wider community, and certain sequencing
+  datasets show better performance with the testing separated from the
+  use of the LFC prior. Also, the separation of LFC shrinkage to a separate
+  function *lfcShrink* allows for easier methods development of
+  alternative effect size estimators.
+* A small change to the independent filtering routine: instead
+  of taking the quantile of the filter (the mean of normalized counts) which
+  directly *maximizes* the number of rejections, the threshold chosen is 
+  the lowest quantile of the filter for which the
+  number of rejections is close to the peak of a curve fit
+  to the number of rejections over the filter quantiles.
+  ``Close to'' is defined as within 1 residual standard deviation.
+  This change was introduced in version 1.10 (October 2015).
+* For the calculation of the beta prior variance, instead of
+  matching the empirical quantile to the quantile of a Normal
+  distribution, DESeq2 now uses the weighted quantile function
+  of the \CRANpkg{Hmisc} package. The weighting is described in the
+  manual page for *nbinomWaldTest*.  The weights are the
+  inverse of the expected variance of log counts (as used in the
+  diagonals of the matrix $W$ in the GLM). The effect of the change
+  is that the estimated prior variance is robust against noisy
+  estimates of log fold change from genes with very small
+  counts. This change was introduced in version 1.6 (October 2014).
+
+For a list of all changes since version 1.0.0, see the `NEWS` file
+included in the package.
+
+## Count outlier detection 
+
+DESeq2 relies on the negative binomial distribution to make
+estimates and perform statistical inference on differences.  While the
+negative binomial is versatile in having a mean and dispersion
+parameter, extreme counts in individual samples might not fit well to
+the negative binomial. For this reason, we perform automatic detection
+of count outliers. We use Cook's distance, which is a measure of how
+much the fitted coefficients would change if an individual sample were
+removed [@Cook1977Detection]. For more on the implementation of 
+Cook's distance see the manual page
+for the *results* function. Below we plot the maximum value of
+Cook's distance for each row over the rank of the test statistic 
+to justify its use as a filtering criterion.
+
+```{r cooksPlot}
+W <- res$stat
+maxCooks <- apply(assays(dds)[["cooks"]],1,max)
+idx <- !is.na(W)
+plot(rank(W[idx]), maxCooks[idx], xlab="rank of Wald statistic", 
+     ylab="maximum Cook's distance per gene",
+     ylim=c(0,5), cex=.4, col=rgb(0,0,0,.3))
+m <- ncol(dds)
+p <- 3
+abline(h=qf(.99, p, m - p))
+``` 
+
+## Contrasts 
+
+Contrasts can be calculated for a *DESeqDataSet* object for which
+the GLM coefficients have already been fit using the Wald test steps
+(*DESeq* with `test="Wald"` or using *nbinomWaldTest*).
+The vector of coefficients $\beta$ is left multiplied by the contrast vector $c$
+to form the numerator of the test statistic. The denominator is formed by multiplying
+the covariance matrix $\Sigma$ for the coefficients on either side by the 
+contrast vector $c$. The square root of this product is an estimate
+of the standard error for the contrast. The contrast statistic is then compared
+to a normal distribution as are the Wald statistics for the DESeq2
+package.
+
+$$ W = \frac{c^t \beta}{\sqrt{c^t \Sigma c}} $$
+
+## Expanded model matrices 
+
+DESeq2 uses *expanded model matrices* in conjunction with the log2
+fold change prior, in order to produce shrunken log2 fold change estimates and test 
+results which are independent of the choice of reference level. 
+Another way of saying this is that the shrinkage is *symmetric*
+with respect to all the levels of the factors in the design.
+The expanded model matrices differ from the standard model matrices, in that
+they have an indicator column (and therefore a coefficient) for
+each level of factors in the design formula in addition to an intercept. 
+Note that in version 1.10 and onward, standard model matrices are used for
+designs with interaction terms, as the shrinkage of log2 fold changes
+is not recommended for these designs.
+
+The expanded model matrices are not full rank, but a coefficient
+vector $\beta_i$ can still be found due to the zero-centered prior on
+non-intercept coefficients. The prior variance for the log2 fold
+changes is calculated by first generating maximum likelihood estimates
+for a standard model matrix. The prior variance for each level of a
+factor is then set as the average of the mean squared maximum
+likelihood estimates for each level and every possible contrast, such
+that that this prior value will be reference-level-independent. The
+`contrast` argument of the *results* function is
+used in order to generate comparisons of interest.
+
+<a name="indfilttheory"/>
+
+## Independent filtering and multiple testing 
+
+### Filtering criteria 
+
+The goal of independent filtering is to filter out those tests from
+the procedure that have no, or little chance of showing significant
+evidence, without even looking at their test statistic. Typically,
+this results in increased detection power at the same experiment-wide
+type I error. Here, we measure experiment-wide type I error in terms
+of the false discovery rate.
+
+A good choice for a filtering criterion is one that
+
+1) is statistically independent from the test statistic under the null hypothesis,
+2) is correlated with the test statistic under the alternative, and
+3) does not notably change the dependence structure -- if there is any -- between 
+   the tests that pass the filter, compared to the dependence structure
+   between the tests before filtering.
+
+The benefit from filtering relies on property (2), and we will explore
+it further below. Its statistical validity relies on
+property (1) -- which is simple to formally prove for many combinations
+of filter criteria with test statistics -- and (3), which is less
+easy to theoretically imply from first principles, but rarely a problem in practice.
+We refer to [@Bourgon:2010:PNAS] for further discussion of this topic.
+
+A simple filtering criterion readily available in the results object
+is the mean of normalized counts irrespective of biological condition,
+and so this is the criterion which is used automatically by the
+*results* function to perform independent filtering.  Genes with very
+low counts are not likely to see significant differences typically due
+to high dispersion. For example, we can plot the $-\log_{10}$ *p*
+values from all genes over the normalized mean counts:
+
+```{r indFilt}
+plot(res$baseMean+1, -log10(res$pvalue),
+     log="x", xlab="mean of normalized counts",
+     ylab=expression(-log[10](pvalue)),
+     ylim=c(0,30),
+     cex=.4, col=rgb(0,0,0,.3))
+```
+
+### Why does it work?
+
+Consider the *p* value histogram below
+It shows how the filtering ameliorates the multiple testing problem
+-- and thus the severity of a multiple testing adjustment -- by
+removing a background set of hypotheses whose *p* values are distributed
+more or less uniformly in [0,1].
+
+```{r histindepfilt}
+use <- res$baseMean > metadata(res)$filterThreshold
+h1 <- hist(res$pvalue[!use], breaks=0:50/50, plot=FALSE)
+h2 <- hist(res$pvalue[use], breaks=0:50/50, plot=FALSE)
+colori <- c(`do not pass`="khaki", `pass`="powderblue")
+``` 
+
+Histogram of p values for all tests.  The area shaded in blue
+indicates the subset of those that pass the filtering, the area in
+khaki those that do not pass: 
+
+```{r fighistindepfilt}
+barplot(height = rbind(h1$counts, h2$counts), beside = FALSE,
+        col = colori, space = 0, main = "", ylab="frequency")
+text(x = c(0, length(h1$counts)), y = 0, label = paste(c(0,1)),
+     adj = c(0.5,1.7), xpd=NA)
+legend("topright", fill=rev(colori), legend=rev(names(colori)))
+```
+
+<a name="FAQ"/>
+
+# Frequently asked questions 
+
+## How can I get support for DESeq2?
+
+We welcome questions about our software, and want to
+ensure that we eliminate issues if and when they appear. We have a few
+requests to optimize the process:
+
+* all questions should take place on the Bioconductor support
+  site: <https://support.bioconductor.org>, which serves as a
+  repository of questions and answers. This helps to save the
+  developers' time in responding to similar questions. Make sure to
+  tag your post with `deseq2`. It is often very helpful in addition 
+  to describe the aim of your experiment.
+* before posting, first search the Bioconductor support site
+  mentioned above for past threads which might have answered your
+  question.
+* if you have a question about the behavior of a function, read
+  the sections of the manual page for this function by typing a
+  question mark and the function name, e.g. `?results`.  We
+  spend a lot of time documenting individual functions and the exact
+  steps that the software is performing.
+* include all of your R code, especially the creation of the
+  *DESeqDataSet* and the design formula.  Include complete
+  warning or error messages, and conclude your message with the full
+  output of `sessionInfo()`.
+* if possible, include the output of
+  `as.data.frame(colData(dds))`, so that we can have a sense
+  of the experimental setup. If this contains confidential
+  information, you can replace the levels of those factors using
+  *levels()*.
+
+
+## Why are some *p* values set to NA?
+  
+See the details [above](#pvaluesNA).
+
+## How can I get unfiltered DESeq2 results?
+
+Users can obtain unfiltered GLM results, i.e. without outlier removal
+or independent filtering with the following call:
+
+```{r vanillaDESeq, eval=FALSE}
+dds <- DESeq(dds, minReplicatesForReplace=Inf)
+res <- results(dds, cooksCutoff=FALSE, independentFiltering=FALSE)
+```
+
+In this case, the only *p* values set to `NA` are those from
+genes with all counts equal to zero.
+
+## How do I use VST or rlog data for differential testing?
+  
+The variance stabilizing and rlog transformations are provided for
+applications other than differential testing, for example clustering
+of samples or other machine learning applications. For differential
+testing we recommend the *DESeq* function applied to raw
+counts as outlined [above](#de).
+  
+## Can I use DESeq2 to analyze paired samples?
+
+Yes, you should use a multi-factor design which includes the sample
+information as a term in the design formula. This will account for 
+differences between the samples while estimating the effect due to 
+the condition. The condition of interest should go at the end of the 
+design formula, e.g. `~ subject + condition`.
+
+## If I have multiple groups, should I run all together or split into pairs of groups?
+
+Typically, we recommend users to run samples from all groups together, and then
+use the `contrast` argument of the *results* function
+to extract comparisons of interest after fitting the model using *DESeq*.
+
+The model fit by *DESeq* estimates a single dispersion
+parameter for each gene, which defines how far we expect the observed
+count for a sample will be from the mean value from the model 
+given its size factor and its condition group. See the section
+[above](#theory) and the DESeq2 paper for full details.
+Having a single dispersion parameter for each gene is usually
+sufficient for analyzing multi-group data, as the final dispersion value will
+incorporate the within-group variability across all groups. 
+
+However, for some datasets, exploratory data analysis (EDA) plots
+could reveal that one or more groups has much 
+higher within-group variability than the others. A simulated example
+of such a set of samples is shown below.
+This is case where, by comparing groups A and B separately --
+subsetting a *DESeqDataSet* to only samples from those two
+groups and then running *DESeq* on this subset -- will be
+more sensitive than a model including all samples together.
+It should be noted that such an extreme range of within-group
+variability is not common, although it could arise if certain
+treatments produce an extreme reaction (e.g. cell death).
+Again, this can be easily detected from the EDA plots such as PCA
+described in this vignette.
+
+Here we diagram an extreme range of within-group variability with a
+simulated dataset. Typically, it is recommended to run *DESeq* across
+samples from all groups, for datasets with multiple groups. However,
+this simulated dataset shows a case where it would be preferable to
+compare groups A and B by creating a smaller dataset without the C
+samples. Group C has much higher within-group variability, which would
+inflate the per-gene dispersion estimate for groups A and B as well:
+
+```{r varGroup, echo=FALSE}
+set.seed(3)
+dds1 <- makeExampleDESeqDataSet(n=1000,m=12,betaSD=.3,dispMeanRel=function(x) 0.01)
+dds2 <- makeExampleDESeqDataSet(n=1000,m=12,
+                                betaSD=.3,
+                                interceptMean=mcols(dds1)$trueIntercept,
+                                interceptSD=0,
+                                dispMeanRel=function(x) 0.2)
+dds2 <- dds2[,7:12]
+dds2$condition <- rep("C",6)
+mcols(dds2) <- NULL
+dds <- cbind(dds1, dds2)
+rld <- rlog(dds, blind=FALSE, fitType="mean")
+plotPCA(rld)
+``` 
+
+## Can I run DESeq2 to contrast the levels of many groups?
+
+DESeq2 will work with any kind of design specified using the R
+formula. We enourage users to consider exploratory data analysis such
+as principal components analysis rather than performing statistical
+testing of all pairs of many groups of samples. Statistical testing is
+one of many ways of describing differences between samples.
+
+Regarding multiple test correction, if a user is planning to
+contrast all pairs of many levels, and then selectively reporting the
+results of only a *subset* of those pairs, one needs to perform multiple testing
+across *contrasts* as well as genes to control for this additional
+form of multiple testing. This can be done by using the `p.adjust`
+function across a long vector of *p* values from all pairs of
+contasts, then re-assigning these adjusted *p* values to the
+appropriate results table.
+
+As a speed concern with fitting very large models, 
+note that each additional level of a factor in the
+design formula adds another parameter to the GLM which is fit by
+DESeq2. Users might consider first removing genes with very few
+reads, e.g. genes with row sum of 1, as this will speed up the
+fitting procedure.
+
+## Can I use DESeq2 to analyze a dataset without replicates?
+
+If a *DESeqDataSet* is provided with an experimental design without replicates,
+a warning is printed, that the samples are treated as replicates
+for estimation of dispersion. This kind of analysis is
+only useful for exploring the data, but will not provide the kind of
+proper statistical inference on differences between groups.
+Without biological replicates, it is not possible to estimate the biological
+variability of each gene. 
+More details can be found in the manual page for `?DESeq`.
+
+## How can I include a continuous covariate in the design formula?
+
+Continuous covariates can be included in the design formula in exactly
+the same manner as factorial covariates, and then *results* for the
+continuous covariate can be extracted by specifying `name`.
+Continuous covariates might make sense in certain experiments, where a
+constant fold change might be 
+expected for each unit of the covariate.  However, in many cases, more
+meaningful results can be obtained by cutting continuous covariates
+into a factor defined over a small number of bins (e.g. 3-5).  In this
+way, the average effect of each group is controlled for, regardless of
+the trend over the continuous covariates.  In R, *numeric*
+vectors can be converted into *factors* using the function *cut*.
+
+## I ran a likelihood ratio test, but results() only gives me one comparison.
+
+"... How do I get the *p* values for all of the variables/levels 
+that were removed in the reduced design?"
+
+This is explained in the help page for `?results` in the
+section about likelihood ratio test p-values, but we will restate the
+answer here. When one performs a likelihood ratio test, the *p* values and
+the test statistic (the `stat` column) are values for the test
+that removes all of the variables which are present in the full
+design and not in the reduced design. This tests the null hypothesis
+that all the coefficients from these variables and levels of these factors
+are equal to zero.
+
+The likelihood ratio test *p* values therefore
+represent a test of *all the variables and all the levels of factors*
+which are among these variables. However, the results table only has space for
+one column of log fold change, so a single variable and a single
+comparison is shown (among the potentially multiple log fold changes
+which were tested in the likelihood ratio test). 
+This is indicated at the top of the results table
+with the text, e.g., log2 fold change (MLE): condition C vs A, followed
+by, LRT p-value: '~ batch + condition' vs '~ batch'.
+This indicates that the *p* value is for the likelihood ratio test of
+*all the variables and all the levels*, while the log fold change is a single
+comparison from among those variables and levels.
+See the help page for *results* for more details.
+
+## What are the exact steps performed by DESeq()?
+
+See the manual page for *DESeq*, which links to the 
+subfunctions which are called in order, where complete details are
+listed. Also you can read the three steps listed in the 
+[the DESeq2 model](#theory) in this document.
+
+
+## Is there an official Galaxy tool for DESeq2?
+
+Yes. The repository for the DESeq2 tool is
+
+<https://github.com/galaxyproject/tools-iuc/tree/master/tools/deseq2> 
+
+and a link to its location in the Tool Shed is 
+
+<https://toolshed.g2.bx.psu.edu/view/iuc/deseq2/d983d19fbbab>.
+
+## I want to benchmark DESeq2 comparing to other DE tools.
+
+One aspect which can cause problems for comparison is that, by default,
+DESeq2 outputs `NA` values for adjusted *p* values based on 
+independent filtering of genes which have low counts.
+This is a way for the DESeq2 to give extra
+information on why the adjusted *p* value for this gene is not small.
+Additionally, *p* values can be set to `NA` based on extreme 
+count outlier detection. These `NA` values should be considered
+*negatives* for purposes of estimating sensitivity and specificity. The
+easiest way to work with the adjusted *p* values in a benchmarking
+context is probably to convert these `NA` values to 1:
+
+```{r convertNA, eval=FALSE}
+res$padj <- ifelse(is.na(res$padj), 1, res$padj)
+``` 
+
+## I have trouble installing DESeq2 on Ubuntu/Linux...
+
+"*I try to install DESeq2 using biocLite(), but I get an error trying to
+install the R packages XML and/or RCurl:*"
+
+`ERROR: configuration failed for package XML`
+
+`ERROR: configuration failed for package RCurl`
+
+You need to install the following devel versions of packages using
+your standard package manager, e.g. `sudo apt-get install` or 
+`sudo apt install`
+
+* libxml2-dev
+* libcurl4-openssl-dev
+
+# Acknowledgments
+
+We have benefited in the development of DESeq2 from the help and
+feedback of many individuals, including but not limited to: 
+
+The Bionconductor Core Team,
+Alejandro Reyes, Andrzej Oles, Aleksandra Pekowska, Felix Klein,
+Nikolaos Ignatiadis,
+Vince Carey,
+Owen Solberg,
+Ruping Sun,
+Devon Ryan, 
+Steve Lianoglou, Jessica Larson, Christina Chaivorapol, Pan Du, Richard Bourgon,
+Willem Talloen, 
+Elin Videvall, Hanneke van Deutekom,
+Todd Burwell, 
+Jesse Rowley,
+Igor Dolgalev,
+Stephen Turner,
+Ryan C Thompson,
+Tyr Wiesner-Hanks,
+Konrad Rudolph,
+David Robinson,
+Mingxiang Teng,
+Mathias Lesche,
+Sonali Arora,
+Jordan Ramilowski,
+Ian Dworkin,
+Bjorn Gruning,
+Ryan McMinds,
+Paul Gordon,
+Leonardo Collado Torres,
+Enrico Ferrero.
+
+# Session info
+
+```{r sessionInfo}
+sessionInfo()
+```
+
+# References
+
diff --git a/inst/doc/DESeq2.Rnw b/inst/doc/DESeq2.Rnw
deleted file mode 100644
index 6e5e43b..0000000
--- a/inst/doc/DESeq2.Rnw
+++ /dev/null
@@ -1,2414 +0,0 @@
-%\VignetteIndexEntry{Analyzing RNA-seq data with the "DESeq2" package}
-%\VignettePackage{DESeq2}
-%\VignetteEngine{knitr::knitr}
-
-% To compile this document
-% library('knitr'); rm(list=ls()); knit('DESeq2.Rnw')
-
-\documentclass{article}
-
-<<style, eval=TRUE, echo=FALSE, results="asis">>=
-BiocStyle::latex2()
-@
-
-\usepackage{subfig}% for combining multiple plots in one figure
-
-\newcommand{\deseqtwo}{\textit{DESeq2}}
-\newcommand{\lowtilde}{\raise.17ex\hbox{$\scriptstyle\mathtt{\sim}$}}
-
-<<knitr, echo=FALSE, results="hide">>=
-library("knitr")
-opts_chunk$set(
-  tidy=FALSE,
-  dev="png",
-  fig.show="hide",
-  fig.width=4, fig.height=4.5,
-  fig.pos="tbh",
-  cache=TRUE,
-  message=FALSE)
-@ 
-
-<<loadDESeq2, echo=FALSE>>=
-library("DESeq2")
-@
-
-\author{Michael I.~Love}
-\affil{Department of Biostatistics, Dana-Farber Cancer Institute and Harvard TH Chan School of Public Health, Boston, US;}
-
-\author{Simon Anders}
-\affil{Institute for Molecular Medicine Finland (FIMM), Helsinki, Finland;}
-
-\author{Wolfgang Huber}
-\affil{European Molecular Biology Laboratory (EMBL), Heidelberg, Germany}
-
-\title{Differential analysis of count data -- the DESeq2 package}
-
-\begin{document}
-
-\maketitle
-
-\begin{abstract}
-  A basic task in the analysis of count data from RNA-seq is the detection of
-  differentially expressed genes. The count data are presented as a table which reports,
-  for each sample, the number of sequence fragments that have been assigned to each
-  gene. Analogous data also arise for other assay types, including comparative ChIP-Seq,
-  HiC, shRNA screening, mass spectrometry.  An important analysis question is the
-  quantification and statistical inference of systematic changes between conditions, as
-  compared to within-condition variability. The package \deseqtwo{} provides
-  methods to test for differential expression by use of negative binomial generalized
-  linear models; the estimates of dispersion and logarithmic fold changes 
-  incorporate data-driven prior distributions\footnote{Other \Bioconductor{} packages 
-  with similar aims are \Biocpkg{edgeR}, \Biocpkg{limma},
-  \Biocpkg{DSS}, \Biocpkg{EBSeq} and \Biocpkg{baySeq}.}. 
-  This vignette explains the use of the package and demonstrates typical workflows.  
-  An RNA-seq workflow\footnote{\url{http://www.bioconductor.org/help/workflows/rnaseqGene/}} 
-  on the Bioconductor website covers similar material to this vignette
-  but at a slower pace, including the generation of count matrices
-  from FASTQ files.
-\end{abstract}
-
-\packageVersion{\Sexpr{BiocStyle::pkg_ver("DESeq2")}}
-
- \vspace{5mm}
-  
-  \begin{table}
-    \begin{tabular}{ | l | }
-      \hline 
-      If you use \deseqtwo{} in published research, please cite:  \\
-      \\
-      M. I. Love, W. Huber, S. Anders: \textbf{Moderated estimation of} \\
-      \textbf{fold change and dispersion for RNA-seq data with DESeq2}. \\
-      \emph{Genome Biology} 2014, \textbf{15}:550. \\
-      \url{http://dx.doi.org/10.1186/s13059-014-0550-8}  \\
-      \hline 
-    \end{tabular}
-  \end{table}
-
-
-<<options, results="hide", echo=FALSE>>=
-options(digits=3, prompt=" ", continue=" ")
-@
-
-\newpage
-
-\tableofcontents
-
-\newpage
-
-\section{Standard workflow}
-
-\subsection{Quick start}
-
-Here we show the most basic steps for a differential expression analysis.
-These steps require you have a \Rclass{RangedSummarizedExperiment} object
-\Robject{se} which contains the counts and information about samples.
-The \Robject{design} indicates that we want to
-measure the effect of condition, controlling for batch differences.
-The two factor variables \Robject{batch} and \Robject{condition} 
-should be columns of \Robject{colData(se)}.
-
-<<quick, eval=FALSE>>=
-dds <- DESeqDataSet(se, design = ~ batch + condition)
-dds <- DESeq(dds)
-res <- results(dds, contrast=c("condition","trt","con"))
-@
-
-If you have a count matrix and sample information table, the first
-line would use \Rfunction{DESeqDataSetFromMatrix} instead of 
-\Rfunction{DESeqDataSet}, as shown in Section~\ref{sec:countmat}.
-
-\subsection{How to get help}
-
-All \deseqtwo{} questions should be posted to the Bioconductor support
-site: \url{https://support.bioconductor.org}, which serves as a
-repository of questions and answers. See the first question in the
-list of Frequently Asked Questions (Section \ref{sec:faq})
-for more information about how to construct an informative post.
-
-\subsection{Input data} \label{sec:prep}
-
-\subsubsection{Why un-normalized counts?}
-
-As input, the \deseqtwo{} package expects count data as obtained, e.\,g.,
-from RNA-seq or another high-throughput sequencing experiment, in the form of a
-matrix of integer values. The value in the $i$-th row and the $j$-th column of
-the matrix tells how many reads can be assigned to gene $i$ in sample $j$.
-Analogously, for other types of assays, the rows of the matrix might correspond
-e.\,g.\ to binding regions (with ChIP-Seq) or peptide sequences (with
-quantitative mass spectrometry). We will list method for obtaining count matrices
-in sections below.
-
-The values in the matrix should be un-normalized counts of sequencing reads (for
-single-end RNA-seq) or fragments (for paired-end RNA-seq). 
-The \href{http://www.bioconductor.org/help/workflows/rnaseqGene/}{RNA-seq workflow}
-describes multiple techniques for preparing such count matrices.
-It is important to provide count matrices as input for \deseqtwo{}'s
-statistical model \cite{Love2014} to hold, as only the  
-count values allow assessing the measurement precision correctly. The \deseqtwo{}
-model internally corrects for library size, so transformed or
-normalized values such as counts scaled by library size should not
-be used as input. 
-
-\subsubsection{\Rclass{SummarizedExperiment} input} \label{sec:sumExpInput}
-
-The class used by the \deseqtwo{} package to store the read counts 
-is \Rclass{DESeqDataSet} which extends the \Rclass{RangedSummarizedExperiment} 
-class of the \Biocpkg{SummarizedExperiment} package. 
-This facilitates preparation steps and also downstream exploration of results. 
-For counting aligned reads in genes, the \Rfunction{summarizeOverlaps} function of
-\Biocpkg{GenomicAlignments} with \Robject{mode="Union"} is
-encouraged, resulting in a \Rclass{RangedSummarizedExperiment} object.
-Other methods for obtaining count matrices are described in the next section.
-
-An example of the steps to produce a \Rclass{RangedSummarizedExperiment} can
-be found in an RNA-seq workflow on the Bioconductor 
-website: \url{http://www.bioconductor.org/help/workflows/rnaseqGene/}
-and in the vignette for the data package \Biocexptpkg{airway}.
-Here we load the \Rclass{RangedSummarizedExperiment} from that package in
-order to build a \Rclass{DESeqDataSet}.
-
-<<loadSumExp>>=
-library("airway")
-data("airway")
-se <- airway
-@
-
-A \Rclass{DESeqDataSet} object must have an associated design formula.  
-The design formula expresses the variables which will be
-used in modeling. The formula should be a tilde ($\sim$) followed by the
-variables with plus signs between them (it will be coerced into an
-\Rclass{formula} if it is not already).  An intercept is included,
-representing the base mean of counts. The design can be changed later, 
-however then all differential analysis steps should be repeated, 
-as the design formula is used to estimate the dispersions and 
-to estimate the log2 fold changes of the model. 
-The constructor function below shows the generation of a
-\Rclass{DESeqDataSet} from a \Rclass{RangedSummarizedExperiment} \Robject{se}. 
-
-\emph{Note}: In order to benefit from the default settings of the
-package, you should put the variable of interest at the end of the
-formula and make sure the control level is the first level.
-
-<<sumExpInput>>=
-library("DESeq2")
-ddsSE <- DESeqDataSet(se, design = ~ cell + dex)
-ddsSE
-@
-
-\subsubsection{Count matrix input} \label{sec:countmat}
-
-Alternatively, the function \Rfunction{DESeqDataSetFromMatrix} can be
-used if you already have a matrix of read counts prepared from another
-source. Another method for quickly producing count matrices 
-from alignment files is the \Rfunction{featureCounts} function
-in the \Biocpkg{Rsubread} package.
-To use \Rfunction{DESeqDataSetFromMatrix}, the user should provide 
-the counts matrix, the information about the samples (the columns of the 
-count matrix) as a \Rclass{DataFrame} or \Rclass{data.frame}, 
-and the design formula.
-
-To demonstate the use of \Rfunction{DESeqDataSetFromMatrix}, 
-we will read in count data from the \Biocexptpkg{pasilla} package.
-We read in a count matrix, which we will name \Robject{countData}, 
-and the sample information table, which we will name \Robject{colData}. 
-Further below we describe how to extract 
-these objects from, e.g. \Rfunction{featureCounts} output.
-
-<<loadPasilla>>=
-library("pasilla")
-pasCts <- system.file("extdata", "pasilla_gene_counts.tsv",
-                 package="pasilla", mustWork=TRUE)
-pasAnno <- system.file("extdata", "pasilla_sample_annotation.csv",
-                       package="pasilla", mustWork=TRUE)
-countData <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
-colData <- read.csv(pasAnno, row.names=1)
-colData <- colData[,c("condition","type")]
-@ 
-
-We examine the count matrix and column data to see if they are consisent:
-
-<<showPasilla>>=
-head(countData)
-head(colData)
-@ 
-
-Note that these are not in the same order with respect to samples! 
-It is critical that the columns of the count matrix and the rows of
-the column data (information about samples) are in the same order..
-We should re-arrange one or the other so that they are consistent in
-terms of sample order (if we do not, later functions would produce
-an error). We additionally need to chop off the \Robject{"fb"} of the 
-row names of \Robject{colData}, so the naming is consistent.
-
-<<reorderPasila>>=
-rownames(colData) <- sub("fb","",rownames(colData))
-all(rownames(colData) %in% colnames(countData))
-countData <- countData[, rownames(colData)]
-all(rownames(colData) == colnames(countData))
-@ 
-
-If you have used the \Rfunction{featureCounts} function in the 
-\Biocpkg{Rsubread} package, the matrix of read counts can be directly 
-provided from the \Robject{"counts"} element in the list output.
-The count matrix and column data can typically be read into R 
-from flat files using base R functions such as \Rfunction{read.csv} 
-or \Rfunction{read.delim}.
-For \textit{HTSeq} count files, see the dedicated input function below.
-
-With the count matrix, \Robject{countData}, and the sample
-information, \Robject{colData}, we can construct a \Rclass{DESeqDataSet}:
-
-<<matrixInput>>=
-dds <- DESeqDataSetFromMatrix(countData = countData,
-                              colData = colData,
-                              design = ~ condition)
-dds
-@
-
-If you have additional feature data, it can be added to the
-\Rclass{DESeqDataSet} by adding to the metadata columns of a newly
-constructed object. (Here we add redundant data just for demonstration, as
-the gene names are already the rownames of the \Robject{dds}.)
-
-<<addFeatureData>>=
-featureData <- data.frame(gene=rownames(countData))
-(mcols(dds) <- DataFrame(mcols(dds), featureData))
-@ 
-
-\subsubsection{tximport: transcript abundance summarized to gene-level}
-
-Users can create gene-level count matrices for use with \deseqtwo{}
-by importing information using the \Biocpkg{tximport} package.
-This workflow allows users to import transcript abundance estimates
-from a variety of external software, including the following methods:
-
-\begin{itemize}
-\item \href{http://www.cs.cmu.edu/~ckingsf/software/sailfish/}{Sailfish} 
-  \cite{Patro2014Sailfish}
-\item \href{http://combine-lab.github.io/salmon/}{Salmon} 
-  \cite{Patro2015Salmon}
-\item
-  \href{https://pachterlab.github.io/kallisto/about.html}{kallisto} 
-  \cite{Bray2015Near}
-\item \href{http://deweylab.github.io/RSEM/}{RSEM} 
-  \cite{Li2011RSEM}
-\end{itemize}
-
-Some advantages of using the above methods for transcript abundance
-estimation are: (i) this approach corrects for potential changes
-in gene length across samples 
-(e.g. from differential isoform usage) \cite{Trapnell2013Differential},
-(ii) some of these methods (\textit{Sailfish, Salmon, kallisto}) 
-are substantially faster and require less memory
-and disk usage compared to alignment-based methods that require
-creation and storage of BAM files, and
-(iii) it is possible to avoid discarding those fragments that can
-align to multiple genes with homologous sequence, thus increasing
-sensitivity \cite{Robert2015Errors}.
-
-Full details on the motivation and methods for importing transcript
-level abundance and count estimates, summarizing to gene-level count matrices 
-and producing an offset which corrects for potential changes in average
-transcript length across samples are described in \cite{Soneson2015}.
-The \textit{tximport}$\rightarrow$\deseqtwo{} approach uses rounded estimated
-gene counts (but not normalized) instead of the raw count of fragments
-which can be unambiguously assigned to a gene.
-
-Here, we demonstrate how to import transcript abundances
-and construct of a gene-level \Rclass{DESeqDataSet} object
-from \textit{Sailfish} \texttt{quant.sf} files, which are
-stored in the \Biocexptpkg{tximportData} package.
-Note that, instead of locating \Robject{dir} using \Rfunction{system.file},
-a user would typically just provide a path, e.g. \texttt{/path/to/quant/files}.
-For further details on use of \Rfunction{tximport}, 
-including the construction of the \Robject{tx2gene} table for linking
-transcripts to genes, please refer to the \Biocpkg{tximport} package vignette. 
-
-<<tximport>>=
-library("tximport")
-library("readr")
-library("tximportData")
-dir <- system.file("extdata", package="tximportData")
-samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
-files <- file.path(dir,"salmon", samples$run, "quant.sf")
-names(files) <- paste0("sample",1:6)
-tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
-txi <- tximport(files, type="salmon", tx2gene=tx2gene, reader=read_tsv)
-@ 
-
-Next we create an condition vector to demonstrate building an
-\Robject{DESeqDataSet}. For a typical use, this information would already
-be present as a column of the \Robject{samples} table.
-The best practice is to read \Robject{colData} from a CSV or TSV file, 
-and to construct \Robject{files} 
-from a column of \Robject{colData}, as shown in the code chunk above.
-
-<<txi2dds>>=
-coldata <- data.frame(condition=factor(rep(c("A","B"),each=3)))
-rownames(coldata) <- colnames(txi$counts)
-ddsTxi <- DESeqDataSetFromTximport(txi, colData=coldata,
-                                   design=~ condition)
-@
-
-The \Robject{ddsTxi} object can then be used as \Robject{dds} in the
-following analysis steps.
-
-\subsubsection{\textit{HTSeq} input}
-
-You can use the function \Rfunction{DESeqDataSetFromHTSeqCount} if you
-have \texttt{htseq-count} from the \textit{HTSeq} python  
-package\footnote{available from \url{http://www-huber.embl.de/users/anders/HTSeq}, described in \cite{Anders:2014:htseq}}.  
-For an example of using the python scripts, see the
-\Biocexptpkg{pasilla} data package. First you will want to specify a
-variable which points to the directory in which the \textit{HTSeq}
-output files are located. 
-
-<<htseqDirI, eval=FALSE>>=
-directory <- "/path/to/your/files/"
-@ 
-
-However, for demonstration purposes only, the following line of
-code points to the directory for the demo \textit{HTSeq} output
-files packages for the \Biocexptpkg{pasilla} package.
-
-<<htseqDirII>>=
-directory <- system.file("extdata", package="pasilla", mustWork=TRUE)
-@ 
-
-We specify which files to read in using \Rfunction{list.files},
-and select those files which contain the string \Robject{"treated"} 
-using \Rfunction{grep}. The \Rfunction{sub} function is used to 
-chop up the sample filename to obtain the condition status, or 
-you might alternatively read in a phenotypic table 
-using \Rfunction{read.table}.
-
-<<htseqInput>>=
-sampleFiles <- grep("treated",list.files(directory),value=TRUE)
-sampleCondition <- sub("(.*treated).*","\\1",sampleFiles)
-sampleTable <- data.frame(sampleName = sampleFiles,
-                          fileName = sampleFiles,
-                          condition = sampleCondition)
-ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
-                                       directory = directory,
-                                       design= ~ condition)
-ddsHTSeq
-@
-
-\subsubsection{Pre-filtering}
-
-While it is not necessary to pre-filter low count genes before running the \deseqtwo{}
-functions, there are two reasons which make pre-filtering useful:
-by removing rows in which there are no reads or nearly no reads,
-we reduce the memory size of the \Robject{dds} data object and 
-we increase the speed of the transformation
-and testing functions within \deseqtwo{}. Here we perform a minimal
-pre-filtering to remove rows that have only 0 or 1 read. Note that more strict
-filtering to increase power is \textit{automatically} applied via independent filtering
-on the mean of normalized counts within the \Rfunction{results}
-function, which will be discussed in Section~\ref{sec:autoFilt}.
-
-<<prefilter>>=
-dds <- dds[ rowSums(counts(dds)) > 1, ]
-@ 
-
-\subsubsection{Note on factor levels} \label{sec:factorLevels}
-
-By default, R will choose a \textit{reference level} for factors based
-on alphabetical order. Then, if you never tell the \deseqtwo{} functions
-which level you want to compare against (e.g. which level represents
-the control group), the comparisons will be based on the alphabetical
-order of the levels. There are two solutions: you can either
-explicitly tell \Rfunction{results} which comparison to make using the
-\Robject{contrast} argument (this will be shown later), or you can
-explicitly set the factors levels. Setting the factor levels can be done in two ways,
-either using factor:
-
-<<factorlvl>>=
-dds$condition <- factor(dds$condition, levels=c("untreated","treated"))
-@ 
-
-...or using \Rfunction{relevel}, just specifying the reference level:
-
-<<relevel>>=
-dds$condition <- relevel(dds$condition, ref="untreated")
-@ 
-
-If you need to subset the columns of a \Rclass{DESeqDataSet},
-i.e., when removing certain samples from the analysis, it is possible
-that all the samples for one or more levels of a variable in the design
-formula would be removed. In this case, the \Rfunction{droplevels} function can be used
-to remove those levels which do not have samples in the current \Rclass{DESeqDataSet}:
-
-<<droplevels>>=
-dds$condition <- droplevels(dds$condition)
-@ 
-
-\subsubsection{Collapsing technical replicates}
-
-\deseqtwo{} provides a function \Rfunction{collapseReplicates} which can
-assist in combining the counts from technical replicates into single
-columns of the count matrix. The term ``technical replicate'' 
-implies multiple sequencing runs of the same library. 
-You should not collapse biological replicates using this function.
-See the manual page for an example of the use of
-\Rfunction{collapseReplicates}. 
-
-\subsubsection{About the pasilla dataset}
-
-We continue with the \Biocexptpkg{pasilla} data constructed from the
-count matrix method above. This data set is from an experiment on
-\emph{Drosophila melanogaster} cell cultures and investigated the
-effect of RNAi knock-down of the splicing factor \emph{pasilla}
-\cite{Brooks2010}.  The detailed transcript of the production of
-the \Biocexptpkg{pasilla} data is provided in the vignette of the 
-data package \Biocexptpkg{pasilla}.
-
-\subsection{Differential expression analysis} \label{sec:de}
-
-The standard differential expression analysis steps are wrapped
-into a single function, \Rfunction{DESeq}. The estimation steps performed
-by this function are described in Section~\ref{sec:glm}, in the manual page for
-\Robject{?DESeq} and in the Methods section of the \deseqtwo{} publication \cite{Love2014}. 
-The individual sub-functions which are called by \Rfunction{DESeq}
-are still available, described in Section~\ref{sec:steps}. 
-
-Results tables are generated using the function \Rfunction{results}, which
-extracts a results table with log2 fold changes, $p$ values and adjusted
-$p$ values. With no arguments to \Rfunction{results}, the results will be for
-the last variable in the design formula, and if this is a factor, 
-the comparison will be the last level of this variable over the first level. 
-Details about the comparison are printed to the console. The text, \texttt{condition}
-\texttt{treated vs untreated}, tells you that the estimates are of the logarithmic
-fold change $\log_2 ( \textrm{treated} / \textrm{untreated} )$.
-
-<<deseq>>=
-dds <- DESeq(dds)
-res <- results(dds)
-res
-@ 
-
-These steps should take less than 30 seconds for most analyses. For
-experiments with many samples (e.g. 100 samples), one can take
-advantage of parallelized computation.  Both of the above functions
-have an argument \Robject{parallel} which if set to \Robject{TRUE} can
-be used to distribute computation across cores specified by the
-\Rfunction{register} function of \Biocpkg{BiocParallel}. For example,
-the following chunk (not evaluated here), would register 4 cores, and
-then the two functions above, with \Robject{parallel=TRUE}, would
-split computation over these cores. 
-
-<<parallel, eval=FALSE>>=
-library("BiocParallel")
-register(MulticoreParam(4))
-@
-
-We can order our results table by the smallest adjusted $p$ value:
-
-<<resOrder>>=
-resOrdered <- res[order(res$padj),]
-@
-
-We can summarize some basic tallies using the
-\Rfunction{summary} function.
-
-<<sumRes>>=
-summary(res)
-@ 
-
-How many adjusted p-values were less than 0.1?
-
-<<sumRes01>>=
-sum(res$padj < 0.1, na.rm=TRUE)
-@ 
-
-The \Rfunction{results} function contains a number of arguments to
-customize the results table which is generated.  Note that the
-\Rfunction{results} function automatically performs independent
-filtering based on the mean of normalized counts for each gene,
-optimizing the number of genes which will have an adjusted $p$ value
-below a given FDR cutoff, \Robject{alpha}.
-Independent filtering is further discussed in Section~\ref{sec:autoFilt}.
-By default the argument
-\Robject{alpha} is set to $0.1$.  If the adjusted $p$ value cutoff
-will be a value other than $0.1$, \Robject{alpha} should be set to
-that value:
-
-<<resAlpha05>>=
-res05 <- results(dds, alpha=0.05)
-summary(res05)
-sum(res05$padj < 0.05, na.rm=TRUE)
-@ 
-
-A generalization of the idea of $p$ value filtering is to \textit{weight} hypotheses
-to optimize power. A new Bioconductor package, \Biocpkg{IHW}, is now available
-that implements the method of \textit{Independent Hypothesis Weighting} \cite{Ignatiadis2015}.
-Here we show the use of \textit{IHW} for $p$ value adjustment of \deseqtwo{} results.
-For more details, please see the vignette of the \Biocpkg{IHW} package.
-Note that the \textit{IHW} result object is stored in the metadata.
-
-<<IHW>>=
-library("IHW")
-resIHW <- results(dds, filterFun=ihw)
-summary(resIHW)
-sum(resIHW$padj < 0.1, na.rm=TRUE)
-metadata(resIHW)$ihwResult
-@ 
-
-If a multi-factor design is used, or if the variable in the design
-formula has more than two levels, the \Robject{contrast} argument of
-\Rfunction{results} can be used to extract different comparisons from
-the \Rclass{DESeqDataSet} returned by \Rfunction{DESeq}.
-Multi-factor designs are discussed further in Section~\ref{sec:multifactor},
-and the use of the \Robject{contrast} argument is dicussed in Section~\ref{sec:contrasts}.
-
-For advanced users, note that all the values calculated by the \deseqtwo{} 
-package are stored in the \Rclass{DESeqDataSet} object, and access 
-to these values is discussed in Section~\ref{sec:access}.
-
-\subsection{Exploring and exporting results}
-
-\subsubsection{MA-plot}
-
-\begin{figure}[tb]
-\includegraphics[width=.49\textwidth]{figure/MANoPrior-1}
-\includegraphics[width=.49\textwidth]{figure/MA-1}
-\caption{
-  MA-plot.
-  These plots show the log2 fold changes from the treatment over
-  the mean of normalized counts, i.e. the average of counts normalized by
-  size factors. The left plot shows the ``unshrunken'' log2 fold changes, 
-  while the right plot, produced by the code above, shows the shrinkage 
-  of log2 fold changes resulting from the incorporation of zero-centered
-  normal prior. The shrinkage is greater for the log2 fold change
-  estimates from genes with low counts and high dispersion, 
-  as can be seen by the narrowing of spread of leftmost points 
-  in the right plot.}
-\label{fig:MA}
-\end{figure}
-
-In \deseqtwo{}, the function \Rfunction{plotMA} shows the log2
-fold changes attributable to a given variable over the mean of normalized counts.
-Points will be colored red if the adjusted $p$ value is less than 0.1.  
-Points which fall out of the window are plotted as open triangles pointing 
-either up or down.
-
-<<MA, fig.width=4.5, fig.height=4.5>>=
-plotMA(res, main="DESeq2", ylim=c(-2,2))
-@
-
-After calling \Rfunction{plotMA}, one can use the function
-\Rfunction{identify} to interactively detect the row number of
-individual genes by clicking on the plot. One can then recover
-the gene identifiers by saving the resulting indices:
-
-<<MAidentify, eval=FALSE>>=
-idx <- identify(res$baseMean, res$log2FoldChange)
-rownames(res)[idx]
-@ 
-
-The MA-plot of log2 fold changes returned by \deseqtwo{} allows us to
-see how the shrinkage of fold changes works for genes with low
-counts. You can still obtain results tables which include the
-``unshrunken'' log2 fold changes (for a simple comparison, the ratio
-of the mean normalized counts in the two groups). A column
-\Robject{lfcMLE} with the unshrunken maximum likelihood estimate (MLE)
-for the log2 fold change will be added with an additional argument to
-\Rfunction{results}:
-
-<<resMLE>>=
-resMLE <- results(dds, addMLE=TRUE)
-head(resMLE, 4)
-@ 
-
-One can make an MA-plot of the unshrunken estimates like so:
-
-<<MANoPrior, fig.width=4.5, fig.height=4.5>>=
-plotMA(resMLE, MLE=TRUE, main="unshrunken LFC", ylim=c(-2,2))
-@
-
-\subsubsection{Plot counts} \label{sec:plotcounts}
-
-It can also be useful to examine the counts of reads for a single gene
-across the groups. A simple function for making this
-plot is \Rfunction{plotCounts}, which normalizes counts by sequencing depth
-and adds a pseudocount of $\frac{1}{2}$ to allow for log scale plotting.
-The counts are grouped by the variables in \Robject{intgroup}, where
-more than one variable can be specified. Here we specify the gene
-which had the smallest $p$ value from the results table created
-above. You can select the gene to plot by rowname or by numeric index.
-
-<<plotCounts, dev="pdf", fig.width=4.5, fig.height=5>>=
-plotCounts(dds, gene=which.min(res$padj), intgroup="condition")
-@ 
-
-For customized plotting, an argument \Robject{returnData} specifies
-that the function should only return a \Rclass{data.frame} for
-plotting with \Rfunction{ggplot}.
-
-<<plotCountsAdv, dev="pdf", fig.width=3.5, fig.height=3.5>>=
-d <- plotCounts(dds, gene=which.min(res$padj), intgroup="condition", 
-                returnData=TRUE)
-library("ggplot2")
-ggplot(d, aes(x=condition, y=count)) + 
-  geom_point(position=position_jitter(w=0.1,h=0)) + 
-  scale_y_log10(breaks=c(25,100,400))
-@ 
-
-\begin{figure}
-\includegraphics[width=.49\textwidth]{figure/plotCounts-1}
-\includegraphics[width=.49\textwidth]{figure/plotCountsAdv-1}
-\caption{
-  Plot of counts for one gene.
-  The plot of normalized counts (plus a pseudocount of $\frac{1}{2}$)
-  either made using the \Rfunction{plotCounts} function (left)
-  or using another plotting library (right, using \CRANpkg{ggplot2}).}
-\label{fig:plotcounts}
-\end{figure}
-
-\subsubsection{More information on results columns} \label{sec:moreInfo}
-
-Information about which variables and tests were used can be found by calling
-the function \Rfunction{mcols} on the results object.
-
-<<metadata>>=
-mcols(res)$description
-@
-
-For a particular gene, a log2 fold change of $-1$ for
-\Robject{condition treated vs untreated} means that the treatment
-induces a multiplicative change in observed gene expression level of
-$2^{-1} = 0.5$ compared to the untreated condition. If the variable of
-interest is continuous-valued, then the reported log2 fold change is
-per unit of change of that variable.
-
-\textbf{Note on p-values set to NA}: some values in the results table
-can be set to \Robject{NA} for one of the following reasons:
-
-\begin{enumerate} 
-  \item If within a row, all samples have zero counts, 
-    the \Robject{baseMean} column will be zero, and the
-    log2 fold change estimates, $p$ value and adjusted $p$ value
-    will all be set to \texttt{NA}.
-  \item If a row contains a sample with an extreme count outlier
-    then the $p$ value and adjusted $p$ value will be set to \texttt{NA}.
-    These outlier counts are detected by Cook's distance. Customization
-    of this outlier filtering and description of functionality for 
-    replacement of outlier counts and refitting is described in 
-    Section~\ref{sec:outlierApproach},
-  \item If a row is filtered by automatic independent filtering, 
-    for having a low mean normalized count, then only the adjusted $p$
-    value will be set to \texttt{NA}. 
-    Description and customization of independent filtering is 
-    described in Section~\ref{sec:autoFilt}.
-\end{enumerate}
-
-\subsubsection{Rich visualization and reporting of results}
-
-\textbf{ReportingTools.} An HTML report of the results with plots and sortable/filterable columns
-can be generated using the \Biocpkg{ReportingTools} package
-on a \Rclass{DESeqDataSet} that has been processed by the \Rfunction{DESeq} function.
-For a code example, see the ``RNA-seq differential expression'' vignette at
-the \Biocpkg{ReportingTools} page, or the manual page for the 
-\Rfunction{publish} method for the \Rclass{DESeqDataSet} class.
-
-\textbf{regionReport.} An HTML and PDF summary of the results with plots
-can also be generated using the \Biocpkg{regionReport} package.
-The \Rfunction{DESeq2Report} function should be run on a 
-\Rclass{DESeqDataSet} that has been processed by the \Rfunction{DESeq} function.
-For more details see the manual page for \Rfunction{DESeq2Report} 
-and an example vignette in the \Biocpkg{regionReport} package.
-
-\textbf{Glimma.} Interactive visualization of \deseqtwo{} output, 
-including MA-plots (also called MD-plot) can be generated using the
-\Biocpkg{Glimma} package. See the manual page for \Rfunction{glMDPlot.DESeqResults}.
-
-\textbf{pcaExplorer.} Interactive visualization of \deseqtwo{} output,
-including PCA plots, boxplots of counts and other useful summaries can be
-generated using the \Biocpkg{pcaExplorer} package.
-See the ``Launching the application'' section of the package vignette.
-
-\subsubsection{Exporting results to CSV files}
-
-A plain-text file of the results can be exported using the 
-base \R{} functions \Rfunction{write.csv} or \Rfunction{write.delim}. 
-We suggest using a descriptive file name indicating the variable
-and levels which were tested.
-
-<<export, eval=FALSE>>=
-write.csv(as.data.frame(resOrdered), 
-          file="condition_treated_results.csv")
-@
-
-Exporting only the results which pass an adjusted $p$ value
-threshold can be accomplished with the \Rfunction{subset} function,
-followed by the \Rfunction{write.csv} function.
-
-<<subset>>=
-resSig <- subset(resOrdered, padj < 0.1)
-resSig
-@ 
-
-\subsection{Multi-factor designs} \label{sec:multifactor}
-
-Experiments with more than one factor influencing the counts can be
-analyzed using design formula that include the additional variables.  
-By adding these to the design, one can control for additional variation
-in the counts. For example, if the condition samples are balanced
-across experimental batches, by including the \Robject{batch} factor to the
-design, one can increase the sensitivity for finding differences due
-to \Robject{condition}. There are multiple ways to analyze experiments when the
-additional variables are of interest and not just controlling factors 
-(see Section \ref{sec:interactions} on interactions).
-
-The data in the \Biocexptpkg{pasilla} package have a condition of interest 
-(the column \Robject{condition}), as well as information on the type of sequencing 
-which was performed (the column \Robject{type}), as we can see below:
-
-<<multifactor>>=
-colData(dds)
-@
-
-We create a copy of the \Rclass{DESeqDataSet}, so that we can rerun
-the analysis using a multi-factor design.
-
-<<copyMultifactor>>=
-ddsMF <- dds
-@
-
-We can account for the different types of sequencing, and get a clearer picture
-of the differences attributable to the treatment.  As \Robject{condition} is the
-variable of interest, we put it at the end of the formula. Thus the \Rfunction{results}
-function will by default pull the \Robject{condition} results unless 
-\Robject{contrast} or \Robject{name} arguments are specified. 
-Then we can re-run \Rfunction{DESeq}:
-
-<<replaceDesign>>=
-design(ddsMF) <- formula(~ type + condition)
-ddsMF <- DESeq(ddsMF)
-@
-
-Again, we access the results using the \Rfunction{results} function.
-
-<<multiResults>>=
-resMF <- results(ddsMF)
-head(resMF)
-@
-
-It is also possible to retrieve the log2 fold changes, $p$ values and adjusted
-$p$ values of the \Robject{type} variable. The \Robject{contrast} argument of 
-the function \Rfunction{results} takes a character vector of length three:
-the name of the variable, the name of the factor level for the numerator
-of the log2 ratio, and the name of the factor level for the denominator.
-The \Robject{contrast} argument can also take other forms, as
-described in the help page for \Rfunction{results} and in Section~\ref{sec:contrasts}.
-
-<<multiTypeResults>>=
-resMFType <- results(ddsMF,
-                     contrast=c("type", "single-read", "paired-end"))
-head(resMFType)
-@
-
-If the variable is continuous or an interaction term (see Section~\ref{sec:interactions})
-then the results can be extracted using the \Robject{name} argument to \Rfunction{results},
-where the name is one of elements returned by \Robject{resultsNames(dds)}.
-
-\newpage
-
-%---------------------------------------------------
-\section{Data transformations and visualization} \label{sec:transf}
-%---------------------------------------------------
-\subsection{Count data transformations}
-%---------------------------------------------------
-
-In order to test for differential expression, we operate on raw counts
-and use discrete distributions as described in the previous Section~\ref{sec:de}.
-However for other downstream analyses -- 
-e.g. for visualization or clustering -- it might be useful 
-to work with transformed versions of the count data. 
-
-Maybe the most obvious choice of transformation is the logarithm.
-Since count values for a gene can be zero in some
-conditions (and non-zero in others), some advocate the use of
-\emph{pseudocounts}, i.\,e.\ transformations of the form
-%
-\begin{equation}\label{eq:shiftedlog}
-  y = \log_2(n + 1)\quad\mbox{or more generally,}\quad y = \log_2(n + n_0),
-\end{equation}
-%
-where $n$ represents the count values and $n_0$ is a positive constant.
-
-In this section, we discuss two alternative
-approaches that offer more theoretical justification and a rational way
-of choosing the parameter equivalent to $n_0$ above.
-The \emph{regularized logarithm} or \emph{rlog} incorporates a prior on
-the sample differences \cite{Love2014}, 
-and the other uses the concept of variance stabilizing
-transformations (VST) \cite{Tibshirani1988,sagmb2003,Anders:2010:GB}.
-Both transformations produce transformed data on the $\log_2$ scale
-which has been normalized with respect to library size.
-
-The point of these two transformations, the \emph{rlog} and the VST,
-is to remove the dependence of the variance on the mean,
-particularly the high variance of the logarithm of count data when the
-mean is low. Both \emph{rlog} and VST use the experiment-wide trend
-of variance over mean, in order to transform the data to remove the
-experiment-wide trend. Note that we do not require or
-desire that all the genes have \emph{exactly} the same variance after
-transformation. Indeed, in Figure~\ref{fig:meansd} below, you will see
-that after the transformations the genes with the same mean do not
-have exactly the same standard deviations, but that the
-experiment-wide trend has flattened. It is those genes with row
-variance above the trend which will allow us to cluster samples into
-interesting groups.
-
-\textbf{Note on running time:} if you have many samples (e.g. 100s),
-the \Rfunction{rlog} function might take too long, and the 
-\Rfunction{varianceStabilizingTransformation} is a faster choice.  
-The rlog and VST have similar properties, but the rlog requires fitting a shrinkage
-term for each sample and each gene which takes time.  See the
-\deseqtwo{} paper for more discussion on the differences
-\cite{Love2014}. In addition, a new function \Rfunction{vst} provides
-an even faster version of the \Rfunction{varianceStabilizingTransformation}
-but calculating the global dispersion trend on a subset of the genes
-(default 1000). \Rfunction{vst} may be attractive for interactive EDA.
-
-\subsubsection{Blind dispersion estimation}
-
-The two functions, \Rfunction{rlog} and
-\Rfunction{varianceStabilizingTransformation}, have an argument
-\Robject{blind}, for whether the transformation should be blind to the
-sample information specified by the design formula. When
-\Robject{blind} equals \Robject{TRUE} (the default), the functions
-will re-estimate the dispersions using only an intercept (design
-formula $\sim 1$). This setting should be used in order to compare
-samples in a manner wholly unbiased by the information about
-experimental groups, for example to perform sample QA (quality
-assurance) as demonstrated below.
-
-However, blind dispersion estimation is not the appropriate choice if
-one expects that many or the majority of genes (rows) will have large
-differences in counts which are explainable by the experimental design,
-and one wishes to transform the data for downstream analysis. In this
-case, using blind dispersion estimation will lead to large estimates
-of dispersion, as it attributes differences due to experimental design
-as unwanted ``noise'', and will result in overly shrinking the transformed
-values towards each other. 
-By setting \Robject{blind} to \Robject{FALSE}, the dispersions
-already estimated will be used to perform transformations, or if not
-present, they will be estimated using the current design formula. Note
-that only the fitted dispersion estimates from mean-dispersion trend
-line are used in the transformation (the global dependence of
-dispersion on mean for the entire experiment).
-So setting \Robject{blind} to \Robject{FALSE} is still for the most
-part not using the information about which samples were in which
-experimental group in applying the transformation.
-
-\subsubsection{Extracting transformed values}
-
-These functions return an object of class \Rclass{DESeqTransform}
-which is a subclass of \Rclass{RangedSummarizedExperiment}. 
-For $\sim 20$ samples, running on a newly created \Robject{DESeqDataSet},
-\Rfunction{rlog} may take 30 seconds, 
-\Rfunction{varianceStabilizingTransformation} may take 5 seconds, and
-\Rfunction{vst} less than 1 second (by subsetting to 1000 genes for
-calculating the global dispersion trend).
-However, the running times are shorter and more similar with \Rcode{blind=FALSE} and
-if the function \Rfunction{DESeq} has already been run, because then
-it is not necessary to re-estimate the dispersion values.
-The \Rfunction{assay} function is used to extract the matrix of normalized values.
-
-<<rlogAndVST>>=
-rld <- rlog(dds, blind=FALSE)
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
-vsd.fast <- vst(dds, blind=FALSE)
-head(assay(rld), 3)
-@
-
-\subsubsection{Regularized log transformation}
-
-The function \Rfunction{rlog}, stands for \emph{regularized log},
-transforming the original count data to the log2 scale by fitting a
-model with a term for each sample and a prior distribution on the
-coefficients which is estimated from the data. This is the same kind
-of shrinkage (sometimes referred to as regularization, or moderation)
-of log fold changes used by the \Rfunction{DESeq} and
-\Rfunction{nbinomWaldTest}, as seen in Figure \ref{fig:MA}. The
-resulting data contains elements defined as:
-
-$$ \log_2(q_{ij}) = \beta_{i0} + \beta_{ij} $$
-
-where $q_{ij}$ is a parameter proportional to the expected true
-concentration of fragments for gene $i$ and sample $j$ (see
-Section~\ref{sec:glm}), $\beta_{i0}$ is an intercept which does not
-undergo shrinkage, and $\beta_{ij}$ is the sample-specific effect
-which is shrunk toward zero based on the dispersion-mean trend over
-the entire dataset. The trend typically captures high dispersions for
-low counts, and therefore these genes exhibit higher shrinkage from
-the\Rfunction{rlog}.
-
-Note that, as $q_{ij}$ represents the part of the mean value
-$\mu_{ij}$ after the size factor $s_j$ has been divided out, it is
-clear that the rlog transformation inherently accounts for differences
-in sequencing depth.  Without priors, this design matrix would lead to
-a non-unique solution, however the addition of a prior on
-non-intercept betas allows for a unique solution to be found.  The
-regularized log transformation is preferable to the variance
-stabilizing transformation if the size factors vary widely.
-
-\subsubsection{Variance stabilizing transformation}
-
-Above, we used a parametric fit for the dispersion. In this case, the
-closed-form expression for the variance stabilizing transformation is
-used by \Rfunction{varianceStabilizingTransformation}, which is
-derived in the file \texttt{vst.pdf}, that is distributed in the
-package alongside this vignette. If a local fit is used (option
-\Robject{fitType="locfit"} to \Rfunction{estimateDispersions}) a
-numerical integration is used instead.
-
-<<vsd1, echo=FALSE, fig.width=4.5, fig.height=4.5, fig.show="asis", fig.small=TRUE, fig.pos="!bt", fig.cap="VST and log2. Graphs of the variance stabilizing transformation for sample 1, in blue, and of the transformation $f(n) = \\log_2(n/s_1)$, in black. $n$ are the counts and $s_1$ is the size factor for the first sample.\\label{figure/vsd1-1}">>=
-px     <- counts(dds)[,1] / sizeFactors(dds)[1]
-ord    <- order(px)
-ord    <- ord[px[ord] < 150]
-ord    <- ord[seq(1, length(ord), length=50)]
-last   <- ord[length(ord)]
-vstcol <- c("blue", "black")
-matplot(px[ord],
-        cbind(assay(vsd)[, 1], log2(px))[ord, ],
-        type="l", lty=1, col=vstcol, xlab="n", ylab="f(n)")
-legend("bottomright",
-       legend = c(
-        expression("variance stabilizing transformation"),
-        expression(log[2](n/s[1]))),
-       fill=vstcol)
-@
-
-The resulting variance stabilizing transformation is shown in Figure
-\ref{figure/vsd1-1}.  The code that produces the figure is hidden from
-this vignette for the sake of brevity, but can be seen in the
-\texttt{.Rnw} or \texttt{.R} source file. Note that the vertical axis
-in such plots is the square root of the variance over all samples, so
-including the variance due to the experimental conditions.  While a
-flat curve of the square root of variance over the mean may seem like
-the goal of such transformations, this may be unreasonable in the case
-of datasets with many true differences due to the experimental
-conditions.
-
-\subsubsection{Effects of transformations on the variance}
-
-Figure~\ref{fig:meansd} plots the standard deviation of the transformed
-data, across samples, against the mean, using the shifted
-logarithm transformation \eqref{eq:shiftedlog}, the
-regularized log transformation and the variance stabilizing transformation.
-The shifted logarithm has elevated standard deviation in the lower
-count range, and the regularized log to a lesser extent, while for
-the variance stabilized data the standard deviation is roughly constant
-along the whole dynamic range.
-
-<<meansd, fig.width=4, fig.height=3, fig.show="asis", fig.wide=TRUE, fig.pos="tb", out.width=".32\\linewidth", fig.cap="Per-gene standard deviation (taken across samples), against the rank of the mean. {\\bfhelvet(a)} for the shifted logarithm $\\log_2(n+1)$, the regularized log transformation {\\bfhelvet(b)} and the variance stabilizing transformation {\\bfhelvet(c)}.\\label{fig:meansd}", fig.subcap="">>=
-library("vsn")
-notAllZero <- (rowSums(counts(dds))>0)
-meanSdPlot(log2(counts(dds,normalized=TRUE)[notAllZero,] + 1))
-meanSdPlot(assay(rld[notAllZero,]))
-meanSdPlot(assay(vsd[notAllZero,]))
-@
-
-%---------------------------------------------------------------
-\subsection{Data quality assessment by sample clustering and visualization}\label{sec:quality}
-%---------------------------------------------------------------
-
-Data quality assessment and quality control (i.\,e.\ the removal of
-insufficiently good data) are essential steps of any data
-analysis. These steps should typically be performed 
-very early in the analysis of a new data set,
-preceding or in parallel to the differential expression testing.
-
-We define the term \emph{quality} as 
-\emph{fitness for purpose}\footnote{\url{http://en.wikipedia.org/wiki/Quality_\%28business\%29}}.
-Our purpose is the detection of differentially expressed genes, and we
-are looking in particular for samples whose experimental treatment
-suffered from an anormality that renders the data points obtained from
-these particular samples detrimental to our purpose.
-
-\subsubsection{Heatmap of the count matrix}\label{sec:hmc}
-To explore a count matrix, it is often instructive to look at it as a
-heatmap.  Below we show how to produce such a heatmap 
-for various transformations of the data.
-
-<<heatmap, dev="pdf", fig.width=5, fig.height=7>>=
-library("pheatmap")
-select <- order(rowMeans(counts(dds,normalized=TRUE)),
-                decreasing=TRUE)[1:20]
-
-nt <- normTransform(dds) # defaults to log2(x+1)
-log2.norm.counts <- assay(nt)[select,]
-df <- as.data.frame(colData(dds)[,c("condition","type")])
-pheatmap(log2.norm.counts, cluster_rows=FALSE, show_rownames=FALSE,
-         cluster_cols=FALSE, annotation_col=df)
-
-pheatmap(assay(rld)[select,], cluster_rows=FALSE, show_rownames=FALSE,
-         cluster_cols=FALSE, annotation_col=df)
-
-pheatmap(assay(vsd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
-         cluster_cols=FALSE, annotation_col=df)
-@
-
-\begin{figure*}
-\includegraphics[width=.32\textwidth]{figure/heatmap-1}
-\includegraphics[width=.32\textwidth]{figure/heatmap-2}
-\includegraphics[width=.32\textwidth]{figure/heatmap-3}
-\caption{Heatmaps showing the expression data of the \Sexpr{length(select)}
-  most highly expressed genes. The data is of log2 normalized counts (left),
-  from regularized log transformation (center) and from variance
-  stabilizing transformation (right).}
-\label{fig:heatmap2}
-\end{figure*}
-
-\subsubsection{Heatmap of the sample-to-sample distances}\label{sec:dists}
-
-Another use of the transformed data is sample clustering. Here, we apply the
-\Rfunction{dist} function to the transpose of the transformed count matrix to get
-sample-to-sample distances. We could alternatively use the variance stabilized
-transformation here.
-
-<<sampleClust>>=
-sampleDists <- dist(t(assay(rld)))
-@
-
-A heatmap of this distance matrix gives us an overview over similarities
-and dissimilarities between samples (Figure \ref{figure/figHeatmapSamples-1}):
-We have to provide a hierarchical clustering \Robject{hc} to the heatmap
-function based on the sample distances, or else the heatmap
-function would calculate a clustering based on the distances between
-the rows/columns of the distance matrix.
-
-<<figHeatmapSamples, dev="pdf", fig.width=7, fig.height=7, fig.show="asis", fig.small=TRUE, fig.pos="tb", fig.cap="Sample-to-sample distances.  Heatmap showing the Euclidean distances between the samples as calculated from the regularized log transformation.\\label{figure/figHeatmapSamples-1}">>=
-library("RColorBrewer")
-sampleDistMatrix <- as.matrix(sampleDists)
-rownames(sampleDistMatrix) <- paste(rld$condition, rld$type, sep="-")
-colnames(sampleDistMatrix) <- NULL
-colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
-pheatmap(sampleDistMatrix,
-         clustering_distance_rows=sampleDists,
-         clustering_distance_cols=sampleDists,
-         col=colors)
-@
-
-\subsubsection{Principal component plot of the samples}\label{sec:pca}
-
-Related to the distance matrix of Section~\ref{sec:dists} is the PCA
-plot of the samples, which we obtain as follows (Figure \ref{figure/figPCA-1}).
-
-<<figPCA, dev="pdf", fig.width=5, fig.height=3>>=
-plotPCA(rld, intgroup=c("condition", "type"))
-@
-
-\incfig[tbh]{figure/figPCA-1}{\textwidth}{PCA plot.}{
-  PCA plot. The \Sexpr{ncol(rld)} samples shown in the 2D
-  plane spanned by their first two principal components. This type of
-  plot is useful for visualizing the overall effect of experimental
-  covariates and batch effects.
-}
-
-It is also possible to customize the PCA plot using the
-\Rfunction{ggplot} function.
-
-<<figPCA2, dev="pdf", fig.width=5, fig.height=3>>=
-data <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
-percentVar <- round(100 * attr(data, "percentVar"))
-ggplot(data, aes(PC1, PC2, color=condition, shape=type)) +
-  geom_point(size=3) +
-  xlab(paste0("PC1: ",percentVar[1],"% variance")) +
-  ylab(paste0("PC2: ",percentVar[2],"% variance")) + 
-  coord_fixed()
-@
-
-\incfig[tbh]{figure/figPCA2-1}{\textwidth}{PCA plot.}{
-  PCA plot customized using the \CRANpkg{ggplot2} library.
-}
-
-
-\newpage
-
-%--------------------------------------------------
-\section{Variations to the standard workflow}
-%--------------------------------------------------
-
-\subsection{Wald test individual steps} \label{sec:steps}
-
-The function \Rfunction{DESeq} runs the following functions in order:
-
-<<WaldTest, eval=FALSE>>=
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersions(dds)
-dds <- nbinomWaldTest(dds)
-@
-
-\subsection{Contrasts} \label{sec:contrasts}
-
-A contrast is a linear combination of estimated log2 fold changes,
-which can be used to test if differences between groups are equal to
-zero.  The simplest use case for contrasts is an experimental design
-containing a factor with three levels, say A, B and C.  Contrasts
-enable the user to generate results for all 3 possible differences:
-log2 fold change of B vs A, of C vs A, and of C vs B.
-The \Robject{contrast} argument of \Rfunction{results} function is
-used to extract test results of log2 fold changes of interest, for example:
-
-<<simpleContrast, eval=FALSE>>=
-results(dds, contrast=c("condition","C","B"))
-@ 
-
-Log2 fold changes can also be added and subtracted by providing a
-\Robject{list} to the \Robject{contrast} argument which has two elements:
-the names of the log2 fold changes to add, and the names of the log2
-fold changes to subtract. The names used in the list should come from
-\Robject{resultsNames(dds)}.
-
-Alternatively, a numeric vector of the
-length of \Robject{resultsNames(dds)} can be provided, for manually
-specifying the linear combination of terms.  Demonstrations of the use
-of contrasts for various designs can be found in the examples section
-of the help page for the \Rfunction{results} function. The
-mathematical formula that is used to generate the contrasts can be found in
-Section~\ref{sec:ctrstTheory}.
-
-\subsection{Interactions} \label{sec:interactions}
-
-Interaction terms can be added to the design formula, in order to
-test, for example, if the log2 fold change attributable to a given
-condition is \textit{different} based on another factor, for example if the
-condition effect differs across genotype.
-
-Many users begin to add interaction terms to the design formula, when
-in fact a much simpler approach would give all the results tables that
-are desired. We will explain this approach first, because it is much
-simpler to perform.
-If the comparisons of interest are, for example, the effect
-of a condition for different sets of samples, a simpler approach than
-adding interaction terms explicitly to the design formula is to
-perform the following steps:
-
-\begin{enumerate}
-\item combine the factors of interest into a single factor with all
-  combinations of the original factors 
-\item change the design to include just this factor, e.g. \Robject{\lowtilde{} group}
-\end{enumerate}
-
-Using this design is similar to adding an interaction term, 
-in that it models multiple condition effects which
-can be easily extracted with \Rfunction{results}.
-Suppose we have two factors \Robject{genotype} (with values I, II, and III) 
-and \Robject{condition} (with values A and B), and we want to extract 
-the condition effect specifically for each genotype. We could use the
-following approach to obtain, e.g. the condition effect for genotype I: 
-
-<<combineFactors, eval=FALSE>>=
-dds$group <- factor(paste0(dds$genotype, dds$condition))
-design(dds) <- ~ group
-dds <- DESeq(dds)
-resultsNames(dds)
-results(dds, contrast=c("group", "IB", "IA"))
-@
-
-<<interFig, dev="pdf", fig.width=4, fig.height=3, echo=FALSE, results="hide">>=
-npg <- 20
-mu <- 2^c(8,10,9,11,10,12)
-cond <- rep(rep(c("A","B"),each=npg),3)
-geno <- rep(c("I","II","III"),each=2*npg)
-table(cond, geno)
-counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
-d <- data.frame(log2c=log2(counts+1), cond, geno)
-library(ggplot2)
-plotit <- function(d, title) {
-  ggplot(d, aes(x=cond, y=log2c, group=geno)) + 
-    geom_jitter(size=1.5, position = position_jitter(width=.15)) +
-    facet_wrap(~ geno) + 
-    stat_summary(fun.y=mean, geom="line", colour="red", size=0.8) + 
-    xlab("condition") + ylab("log2(counts+1)") + ggtitle(title)
-}
-plotit(d, "Gene 1") + ylim(7,13)
-lm(log2c ~ cond + geno + geno:cond, data=d)
-@ 
-
-<<interFig2, dev="pdf", fig.width=4, fig.height=3,  echo=FALSE, results="hide">>=
-mu[4] <- 2^12
-mu[6] <- 2^8
-counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
-d2 <- data.frame(log2c=log2(counts + 1), cond, geno)
-plotit(d2, "Gene 2") + ylim(7,13)
-lm(log2c ~ cond + geno + geno:cond, data=d2)
-@ 
-
-\begin{figure*}
-\includegraphics[width=.49\textwidth]{figure/interFig-1}
-\includegraphics[width=.49\textwidth]{figure/interFig2-1}
-\caption{
-  Genotype-specific condition effects.
-  Here, the y-axis represents $\log_2(\textrm{counts}+1)$, and each
-  group has 20 samples (black dots). A red line connects the mean of
-  the groups within each genotype.
-  On the left side (Gene 1), note that the condition effect is consistent
-  across genotypes. Although condition A has a different baseline for
-  I,II, and III, the condition effect is a log2 fold change of about 2
-  for each genotype.
-  Using a model with an interaction term \Robject{genotype:condition},
-  the interaction terms for genotype II and genotype III will be nearly 0.
-  On the right side (Gene 2), we can see that the condition effect is
-  not consistent across genotype. Here the main condition effect (the
-  effect for the reference genotype I) is again 2. However, this time
-  the interaction terms will be around 1 for genotype II and
-  -4 for genotype III. This is 
-  because the condition effect is higher by 1 for genotype II compared to
-  genotype I, and lower by 4 for genotype III compared to genotype I.
-  The condition effect for genotype II (or III) is obtained by adding the
-  main condition effect and the interaction term for that genotype.
-  Such a plot can be made using the \Rfunction{plotCounts} function
-  (Section~\ref{sec:plotcounts}).
-}
-\label{fig:inter}
-\end{figure*}
-
-Now we will continue to explain the use of interactions in order to
-test for \textit{differences} in condition effects. We continue with
-the example of condition effects across three genotypes (I, II, and III).
-For a diagram of how interactions might look across genotypes 
-please refer to Figure \ref{fig:inter}. 
-
-The key point to remember about designs with interaction terms is
-that, unlike for a design \Robject{\lowtilde{} 
-  genotype + condition}, where the condition effect represents the
-\textit{overall} effect controlling for differences due to genotype, by adding
-\Robject{genotype:condition}, the main condition effect only
-represents the effect of condition for the \textit{reference level} of
-genotype (I, or whichever level was defined by the user as the
-reference level). The interaction terms \Robject{genotypeII.conditionB}
-and \Robject{genotypeIII.conditionB} give the \textit{difference}
-between the condition effect for a given genotype and the condition
-effect for the reference genotype. 
-
-This genotype-condition interaction example is examined in further
-detail in Example 3 in the help page for \Rfunction{results}, which
-can be found by typing \Rcode{?results}. In particular, we show how to
-test for differences in the condition effect across genotype, and we
-show how to obtain the condition effect for non-reference genotypes.
-Note that in \deseqtwo{} version 1.10, the \Rfunction{DESeq} function will turn
-off log fold change shrinkage (setting \Robject{betaPrior=FALSE}),
-for designs which contain an interaction term. Turning off the log
-fold change shrinkage allows the software to use standard model
-matrices (as would be produced by \Rfunction{model.matrix}), where the
-interaction coefficients are easier to interpret.
-
-\subsection{Time-series experiments}
-
-There are a number of ways to analyze time-series experiments,
-depending on the biological question of interest. In order to test for
-any differences over multiple time points, once can use a design
-including the time factor, and then test using the likelihood ratio
-test as described in Section~\ref{sec:LRT}, where the time factor is
-removed in the reduced formula. For a control and treatment time
-series, one can use a design formula containing the condition factor,
-the time factor, and the interaction of the two. In this case, using
-the likelihood ratio test with a reduced model which does not contain
-the interaction terms will test whether the condition induces a change
-in gene expression at any time point after the reference level time point
-(time 0). An example of the later analysis is provided in an RNA-seq
-workflow on the Bioconductor
-website: \url{http://www.bioconductor.org/help/workflows/rnaseqGene/}.
-
-\subsection{Likelihood ratio test} \label{sec:LRT}
-
-\deseqtwo{} offers two kinds of hypothesis tests: the Wald test, where
-we use the estimated standard error of a log2 fold change to test if it is
-equal to zero, and the likelihood ratio test (LRT). The LRT examines
-two models for the counts, a \emph{full} model with a certain number
-of terms and a \emph{reduced} model, in which some of the terms of the
-\emph{full} model are removed. The test determines if the increased
-likelihood of the data using the extra terms in the \emph{full} model
-is more than expected if those extra terms are truly zero.
-
-The LRT is therefore useful for testing multiple
-terms at once, for example testing 3 or more levels of a factor at once,
-or all interactions between two variables. 
-The LRT for count data is conceptually similar to an analysis of variance (ANOVA)
-calculation in linear regression, except that in the case of the Negative
-Binomial GLM, we use an analysis of deviance (ANODEV), where the
-\emph{deviance} captures the difference in likelihood between a full
-and a reduced model.
-
-The likelihood ratio test can be performed by specifying \Rcode{test="LRT"}
-when using the \Rfunction{DESeq} function, and
-providing a reduced design formula, e.g. one in which a
-number of terms from \Robject{design(dds)} are removed.
-The degrees of freedom for the test is obtained from the difference
-between the number of parameters in the two models. 
-A simple likelihood ratio test, if the full design was
-\Robject{~condition} would look like:
-
-<<simpleLRT, eval=FALSE>>=
-dds <- DESeq(dds, test="LRT", reduced=~1)
-res <- results(dds)
-@ 
-
-If the full design contained other variables, 
-such as a batch variable,
-then the likelihood ratio test would look like:
-
-<<simpleLRT2, eval=FALSE>>=
-dds <- DESeq(dds, test="LRT", reduced=~batch)
-res <- results(dds)
-@ 
-
-\subsection{Approach to count outliers} \label{sec:outlierApproach}
-
-RNA-seq data sometimes contain isolated instances of very large counts that are apparently
-unrelated to the experimental or study design, and which may be 
-considered outliers. There are many reasons why outliers can arise, including rare
-technical or experimental artifacts, read mapping problems in the case of genetically
-differing samples, and genuine, but rare biological events. In many cases, users appear
-primarily interested in genes that show a consistent behavior, and this is the reason why
-by default, genes that are affected by such outliers are set aside by \deseqtwo{}, 
-or if there are sufficient samples, outlier counts are replaced for model fitting. 
-These two behaviors are described below.
-
-The \Rfunction{DESeq} function calculates, for every gene and for every sample,
-a diagnostic test for outliers called \emph{Cook's distance}. Cook's distance 
-is a measure of how much a single sample is influencing the fitted 
-coefficients for a gene, and a large value of Cook's distance is 
-intended to indicate an outlier count. 
-The Cook's distances are stored as a matrix available in 
-\Robject{assays(dds)[["cooks"]]}.
-
-The \Rfunction{results} function automatically flags genes which contain a 
-Cook's distance above a cutoff for samples which have 3 or more replicates. 
-The $p$ values and adjusted $p$ values for these genes are set to \Robject{NA}. 
-At least 3 replicates are required for flagging, as it is difficult to judge
-which sample might be an outlier with only 2 replicates.
-This filtering can be turned off with \Rcode{results(dds, cooksCutoff=FALSE)}.
-
-With many degrees of freedom -- i.\,e., many more samples than number of parameters to 
-be estimated -- it is undesirable to remove entire genes from the analysis
-just because their data include a single count outlier. When there
-are 7 or more replicates for a given sample, the \Rfunction{DESeq}
-function will automatically replace counts with large Cook's distance 
-with the trimmed mean over all samples, scaled up by the size factor or 
-normalization factor for that sample. This approach is conservative, 
-it will not lead to false positives, as it replaces
-the outlier value with the value predicted by the null hypothesis.
-This outlier replacement only occurs when there are 7 or more
-replicates, and can be turned off with 
-\Rcode{DESeq(dds, minReplicatesForReplace=Inf)}.
-
-The default Cook's distance cutoff for the two behaviors described above
-depends on the sample size and number of parameters
-to be estimated. The default is to use the $99\%$ quantile of the 
-$F(p,m-p)$ distribution (with $p$ the number of parameters including the 
-intercept and $m$ number of samples).
-The default for gene flagging can be modified using the \Robject{cooksCutoff} 
-argument to the \Rfunction{results} function. 
-For outlier replacement, \Rfunction{DESeq} preserves the original counts in
-\Robject{counts(dds)} saving the replacement counts as a matrix named
-\Robject{replaceCounts} in \Robject{assays(dds)}.
-Note that with continuous variables in the design, outlier detection
-and replacement is not automatically performed, as our 
-current methods involve a robust estimation of within-group variance
-which does not extend easily to continuous covariates. However, users
-can examine the Cook's distances in \Rcode{assays(dds)[["cooks"]]}, in
-order to perform manual visualization and filtering if necessary.
-
-\textbf{Note on many outliers:} if there are very many outliers 
-(e.g. many hundreds or thousands) reported by
-\Rcode{summary(res)}, one might consider further exploration to see if
-a single sample or a few samples should be removed due to low quality. 
-The automatic outlier filtering/replacement is most useful in situations which the number
-of outliers is limited. When there are thousands of reported outliers, 
-it might make more sense to turn off the outlier filtering/replacement
-(\Rfunction{DESeq} with \Robject{minReplicatesForReplace=Inf} and
-\Rfunction{results} with \Robject{cooksCutoff=FALSE})
-and perform manual inspection: First it would be
-advantageous to make a PCA plot using the code example in Section
-\ref{sec:pca} to spot individual sample outliers; Second, one can make
-a boxplot of the Cook's distances to see if one sample is consistently
-higher than others: 
-
-<<boxplotCooks, fig.show="asis", fig.small=TRUE, fig.cap="Boxplot of Cook's distances.  Here we can look to see if one sample has much higher Cook's distances than the other samples. In this case, the samples all have comparable range of Cook's distances.\\label{figure/boxplotCooks-1}">>=
-par(mar=c(8,5,2,2))
-boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
-@ 
-
-\subsection{Dispersion plot and fitting alternatives}
-
-Plotting the dispersion estimates is a useful diagnostic. The dispersion
-plot in Figure \ref{figure/dispFit-1} is typical, with the final estimates shrunk
-from the gene-wise estimates towards the fitted estimates. Some gene-wise
-estimates are flagged as outliers and not shrunk towards the fitted value,
-(this outlier detection is described in the manual page for \Rfunction{estimateDispersionsMAP}).
-The amount of shrinkage can be more or less than seen here, depending 
-on the sample size, the number of coefficients, the row mean
-and the variability of the gene-wise estimates.
-
-<<dispFit, fig.show="asis", fig.small=TRUE, fig.cap="Dispersion plot.  The dispersion estimate plot shows the gene-wise estimates (black), the fitted values (red), and the final maximum \\textit{a posteriori} estimates used in testing (blue).\\label{figure/dispFit-1}">>=
-plotDispEsts(dds)
-@
-
-\subsubsection{Local or mean dispersion fit}
-
-A local smoothed dispersion fit is automatically substitited in the case that
-the parametric curve doesn't fit the observed dispersion mean relationship.
-This can be prespecified by providing the argument
-\Robject{fitType="local"} to either \Rfunction{DESeq} or \Rfunction{estimateDispersions}.
-Additionally, using the mean of gene-wise disperion estimates as the
-fitted value can be specified by providing the argument \Robject{fitType="mean"}. 
-
-\subsubsection{Supply a custom dispersion fit}
-
-Any fitted values can be provided during dispersion estimation, using
-the lower-level functions described in the manual page for
-\Rfunction{estimateDispersionsGeneEst}. In the code chunk below, we
-store the gene-wise estimates which were already calculated and saved 
-in the metadata column \Robject{dispGeneEst}. Then we calculate the
-median value of the dispersion estimates above a threshold, and save
-these values as the fitted dispersions, using the replacement function
-for \Rfunction{dispersionFunction}. In the last line, the function
-\Rfunction{estimateDispersionsMAP}, uses the 
-fitted dispersions to generate maximum \textit{a posteriori} (MAP)
-estimates of dispersion. 
-
-<<dispFitCustom>>=
-ddsCustom <- dds
-useForMedian <- mcols(ddsCustom)$dispGeneEst > 1e-7
-medianDisp <- median(mcols(ddsCustom)$dispGeneEst[useForMedian],
-                     na.rm=TRUE)
-dispersionFunction(ddsCustom) <- function(mu) medianDisp
-ddsCustom <- estimateDispersionsMAP(ddsCustom)
-@
-
-
-\subsection{Independent filtering of results}\label{sec:autoFilt}
-
-The \Rfunction{results} function of the \deseqtwo{} package 
-performs independent filtering by default using 
-the mean of normalized counts as a filter statistic. 
-A threshold on the filter statistic is found which optimizes the number
-of adjusted $p$ values lower than a significance level \Robject{alpha}
-(we use the standard variable name for significance level, 
-though it is unrelated to the dispersion parameter $\alpha$). 
-The theory behind independent filtering is discussed in greater detail
-in Section~\ref{sec:indepfilt}. The adjusted $p$ values for the genes
-which do not pass the filter threshold are set to \Robject{NA}.
-
-The independent filtering is performed using the \Rfunction{filtered\_p} function 
-of the \Biocpkg{genefilter} package, and all of the arguments of \Rfunction{filtered\_p}
-can be passed to the \Rfunction{results} function. 
-The filter threshold value and the number of rejections at each quantile
-of the filter statistic are available as metadata of the object 
-returned by \Rfunction{results}. For example, we can visualize
-the optimization by plotting the \Robject{filterNumRej} attribute of 
-the results object, as seen in Figure \ref{figure/filtByMean-1}.
-
-<<filtByMean, dev="pdf", fig.show="asis", fig.small=TRUE, fig.cap="Independent filtering.  The \\Rfunction{results} function maximizes the number of rejections (adjusted $p$ value less than a significance level), over the quantiles of a filter statistic (the mean of normalized counts). The threshold chosen (vertical line) is the lowest quantile of the filter for which the number of rejections is within 1 residual standard deviation to the peak of a curve fit to the number of rejections o [...]
-metadata(res)$alpha
-metadata(res)$filterThreshold
-plot(metadata(res)$filterNumRej, 
-     type="b", ylab="number of rejections",
-     xlab="quantiles of filter")
-lines(metadata(res)$lo.fit, col="red")
-abline(v=metadata(res)$filterTheta)
-@
-
-Independent filtering can be turned off by setting 
-\Robject{independentFiltering} to \Robject{FALSE}.
-
-<<noFilt>>=
-resNoFilt <- results(dds, independentFiltering=FALSE)
-addmargins(table(filtering=(res$padj < .1),
-                 noFiltering=(resNoFilt$padj < .1)))
-@ 
-
-\subsection{Tests of log2 fold change above or below a threshold}
-
-It is also possible to provide thresholds for constructing
-Wald tests of significance. Two arguments to the \Rfunction{results}
-function allow for threshold-based Wald tests: \Robject{lfcThreshold},
-which takes a numeric of a non-negative threshold value, 
-and \Robject{altHypothesis}, which specifies the kind of test.
-Note that the \textit{alternative hypothesis} is specified by the user, 
-i.e. those genes which the user is interested in finding, and the test 
-provides $p$ values for the null hypothesis, the complement of the set 
-defined by the alternative. The \Robject{altHypothesis} argument can take one 
-of the following four values, where $\beta$ is the log2 fold change
-specified by the \Robject{name} argument:
-
-\begin{itemize}
- \item \Robject{greaterAbs} - $|\beta| > \textrm{lfcThreshold}$ - tests are two-tailed
- \item \Robject{lessAbs} - $|\beta| < \textrm{lfcThreshold}$ - $p$ values are the maximum of the upper and lower tests
- \item \Robject{greater} - $\beta > \textrm{lfcThreshold} $
- \item \Robject{less} - $\beta < -\textrm{lfcThreshold} $
-\end{itemize}
-
-The test \Robject{altHypothesis="lessAbs"} requires that the user have
-run \Rfunction{DESeq} with the argument \Robject{betaPrior=FALSE}.  To
-understand the reason for this requirement, consider that during
-hypothesis testing, the null hypothesis is favored unless the data
-provide strong evidence to reject the null.  For this test, including
-a zero-centered prior on log fold change would favor the alternative
-hypothesis, shrinking log fold changes toward zero.  Removing the
-prior on log fold changes for tests of small log fold change allows
-for detection of only those genes where the data alone provides
-evidence against the null.
-
-The four possible values of \Robject{altHypothesis} are demonstrated
-in the following code and visually by MA-plots in Figure~\ref{figure/lfcThresh-1}. 
-First we run \Rfunction{DESeq} and specify \Robject{betaPrior=FALSE} in order 
-to demonstrate \Robject{altHypothesis="lessAbs"}.
-
-<<ddsNoPrior>>=
-ddsNoPrior <- DESeq(dds, betaPrior=FALSE)
-@
-
-In order to produce results tables for the following tests, the same arguments
-(except \Robject{ylim}) would be provided to the \Rfunction{results} function. 
-
-<<lfcThresh, fig.show="asis", fig.cap='MA-plots of tests of log2 fold change with respect to a threshold value.  Going left to right across rows, the tests are for \\Robject{altHypothesis = "greaterAbs"}, \\Robject{"lessAbs"}, \\Robject{"greater"}, and \\Robject{"less"}.\\label{figure/lfcThresh-1}'>>=
-par(mfrow=c(2,2),mar=c(2,2,1,1))
-yl <- c(-2.5,2.5)
-
-resGA <- results(dds, lfcThreshold=.5, altHypothesis="greaterAbs")
-resLA <- results(ddsNoPrior, lfcThreshold=.5, altHypothesis="lessAbs")
-resG <- results(dds, lfcThreshold=.5, altHypothesis="greater")
-resL <- results(dds, lfcThreshold=.5, altHypothesis="less")
-
-plotMA(resGA, ylim=yl)
-abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
-plotMA(resLA, ylim=yl)
-abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
-plotMA(resG, ylim=yl)
-abline(h=.5,col="dodgerblue",lwd=2)
-plotMA(resL, ylim=yl)
-abline(h=-.5,col="dodgerblue",lwd=2)
-@ 
-
-\subsection{Access to all calculated values}\label{sec:access}
-
-All row-wise calculated values (intermediate dispersion calculations,
-coefficients, standard errors, etc.) are stored in the \Rclass{DESeqDataSet} 
-object, e.g. \Robject{dds} in this vignette. These values are accessible 
-by calling \Rfunction{mcols} on \Robject{dds}. 
-Descriptions of the columns are accessible by two calls to 
-\Rfunction{mcols}.
-
-<<mcols>>=
-mcols(dds,use.names=TRUE)[1:4,1:4]
-# here using substr() only for display purposes
-substr(names(mcols(dds)),1,10) 
-mcols(mcols(dds), use.names=TRUE)[1:4,]
-@
-
-The mean values $\mu_{ij} = s_j q_{ij}$ and the Cook's distances for each gene and
-sample are stored as matrices in the assays slot:
-
-<<muAndCooks>>=
-head(assays(dds)[["mu"]])
-head(assays(dds)[["cooks"]])
-@ 
-
-The dispersions $\alpha_i$ can be accessed with the
-\Rfunction{dispersions} function.
-
-<<dispersions>>=
-head(dispersions(dds))
-# which is the same as 
-head(mcols(dds)$dispersion)
-@ 
-
-The size factors $s_j$ are accessible via \Rfunction{sizeFactors}:
-
-<<sizefactors>>=
-sizeFactors(dds)
-@ 
-
-For advanced users, we also include a convenience function \Rfunction{coef} for 
-extracting the matrix of coefficients $[\beta_{ir}]$ for all genes $i$ and
-parameters $r$, as in the formula in Section~\ref{sec:glm}.
-This function can also return a matrix of standard errors, see \Robject{?coef}.
-The columns of this matrix correspond to the effects returned by \Rfunction{resultsNames}.
-Note that the \Rfunction{results} function is best for building 
-results tables with $p$ values and adjusted $p$ values.
-
-<<coef>>=
-head(coef(dds))
-@ 
-
-The beta prior variance $\sigma_r^2$ is stored as an attribute of the
-\Rclass{DESeqDataSet}: 
-
-<<betaPriorVar>>=
-attr(dds, "betaPriorVar")
-@ 
-
-The dispersion prior variance $\sigma_d^2$ is stored as an
-attribute of the dispersion function:
-
-<<dispPriorVar>>=
-dispersionFunction(dds)
-attr(dispersionFunction(dds), "dispPriorVar")
-@ 
-
-The version of \deseqtwo{} which was used to construct the
-\Rclass{DESeqDataSet} object, or the version used when
-\Rfunction{DESeq} was run, is stored here:
-
-<<versionNum>>=
-metadata(dds)[["version"]]
-@ 
-
-\subsection{Sample-/gene-dependent normalization factors} \label{sec:normfactors}
-
-In some experiments, there might be gene-dependent dependencies
-which vary across samples. For instance, GC-content bias or length
-bias might vary across samples coming from different labs or
-processed at different times. We use the terms ``normalization factors''
-for a gene $\times$ sample matrix, and ``size factors'' for a
-single number per sample.  Incorporating normalization factors,
-the mean parameter $\mu_{ij}$ from Section~\ref{sec:glm} becomes:
-
-$$ \mu_{ij} = NF_{ij} q_{ij} $$
-
-with normalization factor matrix $NF$ having the same dimensions
-as the counts matrix $K$. This matrix can be incorporated as shown
-below. We recommend providing a matrix with row-wise geometric means of $1$, 
-so that the mean of normalized counts for a gene is close to the mean
-of the unnormalized counts.
-This can be accomplished by dividing out the current row geometric means.
-
-<<normFactors, eval=FALSE>>=
-normFactors <- normFactors / exp(rowMeans(log(normFactors)))
-normalizationFactors(dds) <- normFactors
-@
-
-These steps then replace \Rfunction{estimateSizeFactors} in the steps
-described in Section~\ref{sec:steps}. Normalization factors, if present,
-will always be used in the place of size factors.
-
-The methods provided by the \Biocpkg{cqn} or \Biocpkg{EDASeq} packages
-can help correct for GC or length biases. They both describe in their
-vignettes how to create matrices which can be used by \deseqtwo{}.
-From the formula above, we see that normalization factors should be on
-the scale of the counts, like size factors, and unlike offsets which
-are typically on the scale of the predictors (i.e. the logarithmic scale for
-the negative binomial GLM). At the time of writing, the transformation
-from the matrices provided by these packages should be:
-
-<<offsetTransform, eval=FALSE>>=
-cqnOffset <- cqnObject$glm.offset
-cqnNormFactors <- exp(cqnOffset)
-EDASeqNormFactors <- exp(-1 * EDASeqOffset)
-@
-
-\subsection{``Model matrix not full rank''}
-
-While most experimental designs run easily using design formula, some
-design formulas can cause problems and result in the \Rfunction{DESeq}
-function returning an error with the text: ``the model matrix is not
-full rank, so the model cannot be fit as specified.''  There are two
-main reasons for this problem: either one or more columns in the model
-matrix are linear combinations of other columns, or there are levels
-of factors or combinations of levels of multiple factors which are
-missing samples. We address these two problems below and discuss
-possible solutions:
-
-\subsubsection{Linear combinations}
-
-The simplest case is the linear combination, or linear dependency
-problem, when two variables contain exactly the same information, such
-as in the following sample table. The software cannot fit an effect
-for \Robject{batch} and \Robject{condition}, because they produce
-identical columns in the model matrix. This is also referred to as
-``perfect confounding''. A unique solution of coefficients (the $\beta_i$ in
-the formula in Section~\ref{sec:glm}) is not possible.
-
-<<lineardep, echo=FALSE>>=
-data.frame(batch=factor(c(1,1,2,2)), condition=factor(c("A","A","B","B")))
-@ 
-
-Another situation which will cause problems is when the variables are
-not identical, but one variable can be formed by the combination of
-other factor levels. In the following example, the effect of batch 2
-vs 1 cannot be fit because it is identical to a column in the model
-matrix which represents the condition C vs A effect.
-
-<<lineardep2, echo=FALSE>>=
-data.frame(batch=factor(c(1,1,1,1,2,2)), condition=factor(c("A","A","B","B","C","C")))
-@ 
-
-In both of these cases above, the batch effect cannot be fit and must
-be removed from the model formula. There is just no way to tell apart
-the condition effects and the batch effects. The options are either to assume
-there is no batch effect (which we know is highly unlikely given the
-literature on batch effects in sequencing datasets) or to repeat the
-experiment and properly balance the conditions across batches.
-A balanced design would look like:
-
-<<lineardep3, echo=FALSE>>=
-data.frame(batch=factor(c(1,1,1,2,2,2)), condition=factor(c("A","B","C","A","B","C")))
-@ 
-
-Finally, there is a case where we can in fact perform inference.
-Consider an experiment with grouped individuals,
-where we seek to test the group-specific effect of a treatment, while
-controlling for individual effects. A simple example of such a design is:
-
-<<groupeffect>>=
-(coldata <- data.frame(grp=factor(rep(c("X","Y"),each=4)),
-                       ind=factor(rep(1:4,each=2)),
-                       cnd=factor(rep(c("A","B"),4))))
-@
-
-
-This design can be analyzed by \deseqtwo{} but requires a bit of
-refactoring in order to fit the model terms. Here we will use a trick
-described in the \Biocpkg{edgeR} user guide, from the section
-``Comparisons Both Between and Within Subjects''.  If we try to
-analyze with a formula such as, \Rcode{$\sim$ ind + grp*cnd}, we will
-obtain an error, because the effect for group is a linear combination
-of the individuals.
-
-However, the following steps allow for an analysis of group-specific
-condition effects, while controlling for differences in individual.
-For object construction, use a dummy design, such as \Rcode{$\sim$
-  1}. Then add a column \Robject{ind.n} which distinguishes the
-individuals ``nested'' within a group. Here, we add this column to
-coldata, but in practice you would add this column to \Rcode{dds}.
-
-<<groupeffect2>>=
-coldata$ind.n <- factor(rep(rep(1:2,each=2),2))
-coldata
-@ 
-
-Now we can reassign our \Rclass{DESeqDataSet} a design of
-\Rcode{$\sim$ grp + grp:ind.n + grp:cnd}, before we call
-\Rfunction{DESeq}. This new design will result in the following model
-matrix: 
-
-<<groupeffect3>>=
-model.matrix(~ grp + grp:ind.n + grp:cnd, coldata)
-@ 
-
-where the terms \Robject{grpX.cndB} and \Robject{grpY.cndB} give the
-group-specific condition effects. These can be extracted using
-\Rfunction{results} with the \Robject{name} argument.
-Furthermore, \Robject{grpX.cndB} and
-\Robject{grpY.cndB} can be contrasted using the \Robject{contrast}
-argument, in order to test if the condition effect is different across group:
-
-<<groupeffect4, eval=FALSE>>=
-results(dds, contrast=list("grpY.cndB","grpX.cndB"))
-@ 
-
-\subsubsection{Levels without samples}
-
-The base R function for creating model matrices will produce a column
-of zeros if a level is missing from a factor or a combination of
-levels is missing from an interaction of factors. The solution to the
-first case is to call \Rfunction{droplevels} on the column, which will
-remove levels without samples. This was shown in the beginning of this
-vignette.
-
-The second case is also solvable, by manually editing the model
-matrix, and then providing this to \Rfunction{DESeq}. Here we
-construct an example dataset to illustrate:
-
-<<missingcombo>>=
-group <- factor(rep(1:3,each=6))
-condition <- factor(rep(rep(c("A","B","C"),each=2),3))
-(d <- data.frame(group, condition)[-c(17,18),])
-@ 
-
-Note that if we try to estimate all interaction terms, we introduce a
-column with all zeros, as there are no condition C samples for group
-3. (Here, \Rfunction{unname} is used to display the matrix concisely.)
-
-<<missingcombo2>>=
-m1 <- model.matrix(~ condition*group, d)
-colnames(m1)
-unname(m1)
-@ 
-
-We can remove this column like so:
-
-<<missingcombo3>>=
-m1 <- m1[,-9]
-unname(m1)
-@ 
-
-Now this matrix \Robject{m1} can be provided to the \Robject{full}
-argument of \Rfunction{DESeq}.  For a likelihood ratio test of
-interactions, a model matrix using a reduced design such as
-\Rcode{$\sim$ condition + group} can be given to the \Robject{reduced}
-argument. Wald tests can also be generated instead of the likelihood
-ratio test, but for user-supplied model matrices, the argument
-\Robject{betaPrior} must be set to \Robject{FALSE}.
-
-\newpage
-
-%--------------------------------------------------
-\section{Theory behind DESeq2}
-%--------------------------------------------------
-  
-\subsection{The DESeq2 model} \label{sec:glm}
-
-The \deseqtwo{} model and all the steps taken in the software
-are described in detail in our publication \cite{Love2014},
-and we include the formula and descriptions in this section as well.
-The differential expression analysis in \deseqtwo{} uses a generalized
-linear model of the form:
-
-$$ K_{ij} \sim \textrm{NB}(\mu_{ij}, \alpha_i) $$
-$$ \mu_{ij} = s_j q_{ij} $$
-$$ \log_2(q_{ij}) = x_{j.} \beta_i $$
-
-where counts $K_{ij}$ for gene $i$, sample $j$ are modeled using
-a negative binomial distribution with fitted mean $\mu_{ij}$
-and a gene-specific dispersion parameter $\alpha_i$.
-The fitted mean is composed of a sample-specific size factor
-$s_j$\footnote{The model can be generalized to use sample- 
-\textbf{and} gene-dependent normalization factors, see
-Appendix~\ref{sec:normfactors}.} and a parameter $q_{ij}$ 
-proportional to the expected true concentration of fragments for sample $j$.
-The coefficients $\beta_i$ give the log2 fold changes for gene $i$ for each 
-column of the model matrix $X$. 
-
-The dispersion parameter $\alpha_i$ defines the relationship between
-the variance of the observed count and its mean value. In other
-words, how far do we expected the observed count will be from the
-mean value, which depends both on the size factor $s_j$ and the
-covariate-dependent part $q_{ij}$ as defined above.
-
-$$ \textrm{Var}(K_{ij}) = E[ (K_{ij} - \mu_{ij})^2 ] = \mu_{ij} + \alpha_i \mu_{ij}^2 $$
-
-The log2 fold changes in $\beta_i$ are the maximum \emph{a posteriori}
-estimates after incorporating a 
-zero-centered Normal prior -- in the software referrred to as a $\beta$-prior -- hence \deseqtwo{}
-provides ``moderated'' log2 fold change estimates.  Dispersions are estimated using expected mean
-values from the maximum likelihood estimate of log2 fold changes, and optimizing the Cox-Reid
-adjusted profile likelihood, as first implemented for RNA-seq data in \Biocpkg{edgeR}
-\cite{CR,edgeR_GLM}. The steps performed by the \Rfunction{DESeq} function are documented in its
-manual page; briefly, they are:
-
-\begin{enumerate}
-\item estimation of size factors $s_j$ by \Rfunction{estimateSizeFactors}
-\item estimation of dispersion $\alpha_i$ by \Rfunction{estimateDispersions}
-\item negative binomial GLM fitting for $\beta_i$ and Wald statistics by 
-\Rfunction{nbinomWaldTest}
-\end{enumerate}
-
-For access to all the values calculated during these steps,
-see Section~\ref{sec:access}
-
-\subsection{Changes compared to the  \Biocpkg{DESeq} package}
-
-The main changes in the package \deseqtwo{}, compared to the (older)
-version \Biocpkg{DESeq}, are as follows:
-
-\begin{itemize}
-\item \Rclass{RangedSummarizedExperiment} is used as the superclass for storage of input data,
-  intermediate calculations and results.
-\item Maximum \textit{a posteriori} estimation of GLM coefficients
-  incorporating a zero-centered
-  Normal prior with variance estimated from data (equivalent to Tikhonov/ridge
-  regularization). This adjustment has little effect on genes with high counts, yet it
-  helps to moderate the otherwise large variance in log2 fold change estimates
-  for genes with low counts or highly variable counts.
-\item Maximum \textit{a posteriori} estimation of dispersion replaces the
-  \Robject{sharingMode} options \Robject{fit-only} or \Robject{maximum} of the previous version
-  of the package. This is similar to the dispersion estimation methods of DSS \cite{Wu2012New}.
-\item All estimation and inference is based on the generalized linear model, which
-  includes the two condition case (previously the \textit{exact test} was used).
-\item The Wald test for significance of GLM coefficients is provided as the default
-  inference method, with the likelihood ratio test of the previous version still available.
-\item It is possible to provide a matrix of sample-/gene-dependent
-  normalization factors (Section \ref{sec:normfactors}).
-\item Automatic independent filtering on the mean of normalized counts
-  (Section \ref{sec:indepfilt}).
-\item Automatic outlier detection and handling (Section \ref{sec:cooks}).
-\end{itemize}
-
-\subsection{Methods changes since the 2014 DESeq2 paper}
-
-\begin{itemize}
-  \item For the calculation of the beta prior variance, instead of
-    matching the empirical quantile to the quantile of a Normal
-    distribution, \deseqtwo() now uses the weighted quantile function
-    of the \CRANpkg{Hmisc} package. The weighting is described in the
-    manual page for \Rfunction{nbinomWaldTest}.  The weights are the
-    inverse of the expected variance of log counts (as used in the
-    diagonals of the matrix $W$ in the GLM). The effect of the change
-    is that the estimated prior variance is robust against noisy
-    estimates of log fold change from genes with very small
-    counts. This change was introduced in version 1.6 (October 2014).
-  \item For designs with interaction terms, the solution described in
-    the paper is no longer used (log fold change shrinkage only
-    applied to interaction terms). Instead, \deseqtwo{} now turns off
-    log fold change shrinkage for all terms if an interaction term is
-    present (\Robject{betaPrior=FALSE}).  While the inference on
-    interaction terms was correct with \Robject{betaPrior=TRUE}, the
-    interpretation of the individual terms and the extraction of
-    contrasts was too confusing.  This change was introduced in version 1.10
-    (October 2015).
-  \item A small change to the independent filtering routine: instead
-    of taking the quantile of the filter (the mean of normalized counts) which
-    directly \textit{maximizes} the number of rejections, the threshold chosen is 
-    the lowest quantile of the filter for which the
-    number of rejections is close to the peak of a curve fit
-    to the number of rejections over the filter quantiles.
-    ``Close to'' is defined as within 1 residual standard deviation.
-    This change was introduced in version 1.10 (October 2015).
-\end{itemize}
-
-For a list of all changes since version 1.0.0, see the NEWS file
-included in the package.
-
-\subsection{Count outlier detection} \label{sec:cooks}
-
-\deseqtwo{} relies on the negative binomial distribution to make
-estimates and perform statistical inference on differences.  While the
-negative binomial is versatile in having a mean and dispersion
-parameter, extreme counts in individual samples might not fit well to
-the negative binomial. For this reason, we perform automatic detection
-of count outliers. We use Cook's distance, which is a measure of how
-much the fitted coefficients would change if an individual sample were
-removed \cite{Cook1977Detection}. For more on the implementation of 
-Cook's distance see Section~\ref{sec:outlierApproach} and the manual page
-for the \Rfunction{results} function. Below we plot the maximum value of
-Cook's distance for each row over the rank of the test statistic 
-to justify its use as a filtering criterion.
-
-<<cooksPlot, fig.show="asis", fig.small=TRUE, fig.cap="Cook's distance.  Plot of the maximum Cook's distance per gene over the rank of the Wald statistics for the condition. The two regions with small Cook's distances are genes with a single count in one sample. The horizontal line is the default cutoff used for 7 samples and 3 estimated parameters.\\label{figure/cooksPlot-1}">>=
-W <- res$stat
-maxCooks <- apply(assays(dds)[["cooks"]],1,max)
-idx <- !is.na(W)
-plot(rank(W[idx]), maxCooks[idx], xlab="rank of Wald statistic", 
-     ylab="maximum Cook's distance per gene",
-     ylim=c(0,5), cex=.4, col=rgb(0,0,0,.3))
-m <- ncol(dds)
-p <- 3
-abline(h=qf(.99, p, m - p))
-@ 
-
-\subsection{Contrasts} \label{sec:ctrstTheory}
-
-Contrasts can be calculated for a \Rclass{DESeqDataSet} object for which
-the GLM coefficients have already been fit using the Wald test steps
-(\Rfunction{DESeq} with \texttt{test="Wald"} or using \Rfunction{nbinomWaldTest}).
-The vector of coefficients $\beta$ is left multiplied by the contrast vector $c$
-to form the numerator of the test statistic. The denominator is formed by multiplying
-the covariance matrix $\Sigma$ for the coefficients on either side by the 
-contrast vector $c$. The square root of this product is an estimate
-of the standard error for the contrast. The contrast statistic is then compared
-to a normal distribution as are the Wald statistics for the \deseqtwo{}
-package.
-
-$$ W = \frac{c^t \beta}{\sqrt{c^t \Sigma c}} $$
-
-\subsection{Expanded model matrices} \label{sec:expanded}
-
-\deseqtwo{} uses ``expanded model matrices'' with the log2 fold change prior, 
-in order to produce shrunken log2 fold change estimates and test 
-results which are independent of the choice of reference level. 
-Another way of saying this is that the shrinkage is \textit{symmetric}
-with respect to all the levels of the factors in the design.
-The expanded model matrices differ from the standard model matrices, in that
-they have an indicator column (and therefore a coefficient) for
-each level of factors in the design formula in addition to an intercept. 
-Note that in version 1.10 and onward, standard model matrices are used for
-designs with interaction terms, as the shrinkage of log2 fold changes
-is not recommended for these designs.
-
-The expanded model matrices are not full rank, but a coefficient
-vector $\beta_i$ can still be found due to the zero-centered prior on
-non-intercept coefficients. The prior variance for the log2 fold
-changes is calculated by first generating maximum likelihood estimates
-for a standard model matrix. The prior variance for each level of a
-factor is then set as the average of the mean squared maximum
-likelihood estimates for each level and every possible contrast, such
-that that this prior value will be reference-level-independent. The
-\Robject{contrast} argument of the \Rfunction{results} function is
-used in order to generate comparisons of interest.
-
-%--------------------------------------------------
-\subsection{Independent filtering and multiple testing} \label{sec:indepfilt}
-\subsubsection{Filtering criteria} \label{sec:filtbycount}
-%--------------------------------------------------
-
-The goal of independent filtering is to filter out those tests from the procedure 
-that have no, or little chance of showing significant evidence, without even
-looking at their test statistic. Typically, this results in increased detection
-power at the same experiment-wide type I error. Here, we  measure experiment-wide
-type I error in terms of the false discovery rate.
-
-A good choice for a filtering criterion is one that
-\begin{enumerate}
-  \item\label{it:indp} is statistically independent from the test statistic under the null hypothesis,
-  \item\label{it:corr} is correlated with the test statistic under the alternative, and
-  \item\label{it:joint} does not notably change the dependence structure --if there is any--
-    between the tests that pass the filter, compared to the dependence structure between the tests before filtering.
-\end{enumerate}
-
-The benefit from filtering relies on property \ref{it:corr}, and we will explore
-it further in Section~\ref{sec:whyitworks}. Its statistical validity relies on
-property \ref{it:indp} -- which is simple to formally prove for many combinations
-of filter criteria with test statistics-- and \ref{it:joint}, which is less
-easy to theoretically imply from first principles, but rarely a problem in practice.
-We refer to \cite{Bourgon:2010:PNAS} for further discussion of this topic.
-
-A simple filtering criterion readily available in the results object is the
-mean of normalized counts irrespective of biological condition (Figure \ref{figure/indFilt-1}),
-and so this is the criterion which is used automatically by the
-\Rfunction{results} function to perform independent filtering.
-Genes with very low counts are not likely to 
-see significant differences typically due to high
-dispersion. For example, we can plot the $-\log_{10}$ $p$ values from all genes
-over the normalized mean counts.
-
-<<indFilt, fig.show="asis", fig.small=TRUE, fig.cap="Mean counts as a filter statistic.  The mean of normalized counts provides an independent statistic for filtering the tests. It is independent because the information about the variables in the design formula is not used. By filtering out genes which fall on the left side of the plot, the majority of the low $p$ values are kept.\\label{figure/indFilt-1}">>=
-plot(res$baseMean+1, -log10(res$pvalue),
-     log="x", xlab="mean of normalized counts",
-     ylab=expression(-log[10](pvalue)),
-     ylim=c(0,30),
-     cex=.4, col=rgb(0,0,0,.3))
-@
-
-%--------------------------------------------------
-\subsubsection{Why does it work?}\label{sec:whyitworks}
-%--------------------------------------------------
-
-Consider the $p$ value histogram in Figure \ref{figure/fighistindepfilt-1}.
-It shows how the filtering ameliorates the multiple testing problem
--- and thus the severity of a multiple testing adjustment -- by
-removing a background set of hypotheses whose $p$ values are distributed
-more or less uniformly in $[0,1]$.
-
-<<histindepfilt, dev="pdf", fig.width=7, fig.height=5>>=
-use <- res$baseMean > metadata(res)$filterThreshold
-h1 <- hist(res$pvalue[!use], breaks=0:50/50, plot=FALSE)
-h2 <- hist(res$pvalue[use], breaks=0:50/50, plot=FALSE)
-colori <- c(`do not pass`="khaki", `pass`="powderblue")
-@ 
-
-<<fighistindepfilt, fig.show="asis", fig.small=TRUE, fig.cap="Histogram of p values for all tests.  The area shaded in blue indicates the subset of those that pass the filtering, the area in khaki those that do not pass.\\label{figure/fighistindepfilt-1}">>=
-barplot(height = rbind(h1$counts, h2$counts), beside = FALSE,
-        col = colori, space = 0, main = "", ylab="frequency")
-text(x = c(0, length(h1$counts)), y = 0, label = paste(c(0,1)),
-     adj = c(0.5,1.7), xpd=NA)
-legend("topright", fill=rev(colori), legend=rev(names(colori)))
-@
-
-\section{Frequently asked questions} \label{sec:faq}
-
-\subsection{How can I get support for DESeq2?}
-
-We welcome questions about our software, and want to
-ensure that we eliminate issues if and when they appear. We have a few
-requests to optimize the process:
-
-\begin{itemize}
-\item all questions should take place on the Bioconductor support
-  site: \url{https://support.bioconductor.org}, which serves as a
-  repository of questions and answers. This helps to save the
-  developers' time in responding to similar questions. Make sure to
-  tag your post with ``deseq2''. It is often very helpful in addition 
-  to describe the aim of your experiment.
-\item before posting, first search the Bioconductor support site
-  mentioned above for past threads which might have answered your
-  question.
-\item if you have a question about the behavior of a function, read
-  the sections of the manual page for this function by typing a
-  question mark and the function name, e.g. \Robject{?results}.  We
-  spend a lot of time documenting individual functions and the exact
-  steps that the software is performing.
-\item include all of your R code, especially the creation of the
-  \Rclass{DESeqDataSet} and the design formula.  Include complete
-  warning or error messages, and conclude your message with the full
-  output of \Robject{sessionInfo()}.
-\item if possible, include the output of
-  \Robject{as.data.frame(colData(dds))}, so that we can have a sense
-  of the experimental setup. If this contains confidential
-  information, you can replace the levels of those factors using
-  \Rfunction{levels()}.
-\end{itemize}
-
-\subsection{Why are some $p$ values set to \texttt{NA}?}
-  
-See the details in Section~\ref{sec:moreInfo}.  
-
-\subsection{How can I get unfiltered DESeq results?}
-
-Users can obtain unfiltered GLM results, i.e. without outlier removal
-or independent filtering with the following call:
-
-<<vanillaDESeq, eval=FALSE>>=
-dds <- DESeq(dds, minReplicatesForReplace=Inf)
-res <- results(dds, cooksCutoff=FALSE, independentFiltering=FALSE)
-@
-
-In this case, the only $p$ values set to \Robject{NA} are those from
-genes with all counts equal to zero.
-
-\subsection{How do I use the variance stabilized or rlog 
-  transformed data for differential testing?}
-  
-  The variance stabilizing and rlog transformations are provided for
-  applications other than differential testing, for example clustering
-  of samples or other machine learning applications. For differential
-  testing we recommend the \Rfunction{DESeq} function applied to raw
-  counts as outlined in Section~\ref{sec:de}.
-      
-  
-\subsection{Can I use DESeq2 to analyze paired samples?}
-
-Yes, you should use a multi-factor design which includes the sample
-information as a term in the design formula. This will account for 
-differences between the samples while estimating the effect due to 
-the condition. The condition of interest should go at the end of the 
-design formula. See Section~\ref{sec:multifactor}.
-
-\subsection{If I have multiple groups, should I run all together or split into pairs of groups?}
-
-Typically, we recommend users to run samples from all groups together, and then
-use the \Rcode{contrast} argument of the \Rfunction{results} function
-to extract comparisons of interest after fitting the model using \Rfunction{DESeq}.
-
-The model fit by \Rfunction{DESeq} estimates a single dispersion
-parameter for each gene, which defines how far we expect the observed
-count for a sample will be from the mean value from the model 
-given its size factor and its condition group. See Section~\ref{sec:glm} 
-and the \deseqtwo{} paper for full details.
-Having a single dispersion parameter for each gene is usually
-sufficient for analyzing multi-group data, as the final dispersion value will
-incorporate the within-group variability across all groups. 
-
-However, for some datasets, exploratory data analysis (EDA) plots as outlined
-in Section~\ref{sec:pca} could reveal that one or more groups has much
-higher within-group variability than the others. A simulated example
-of such a set of samples is shown in Figure~\ref{figure/varGroup-1}.
-This is case where, by comparing groups A and B separately --
-subsetting a \Rclass{DESeqDataSet} to only samples from those two
-groups and then running \Rfunction{DESeq} on this subset -- will be
-more sensitive than a model including all samples together.
-It should be noted that such an extreme range of within-group
-variability is not common, although it could arise if certain
-treatments produce an extreme reaction (e.g. cell death).
-Again, this can be easily detected from the EDA plots such as PCA
-described in this vignette.
-
-<<varGroup, echo=FALSE, fig.width=5, fig.height=5, fig.show="asis", fig.small=TRUE, fig.cap="Extreme range of within-group variability.  Typically, it is recommended to run \\Rfunction{DESeq} across samples from all groups, for datasets with multiple groups. However, this simulated dataset shows a case where it would be preferable to compare groups A and B by creating a smaller dataset without the C samples. Group C has much higher within-group variability, which would inflate the per-ge [...]
-set.seed(3)
-dds1 <- makeExampleDESeqDataSet(n=1000,m=12,betaSD=.3,dispMeanRel=function(x) 0.01)
-dds2 <- makeExampleDESeqDataSet(n=1000,m=12,
-                                betaSD=.3,
-                                interceptMean=mcols(dds1)$trueIntercept,
-                                interceptSD=0,
-                                dispMeanRel=function(x) 0.2)
-dds2 <- dds2[,7:12]
-dds2$condition <- rep("C",6)
-mcols(dds2) <- NULL
-dds <- cbind(dds1, dds2)
-rld <- rlog(dds, blind=FALSE, fitType="mean")
-plotPCA(rld)
-@ 
-
-\subsection{Can I run DESeq2 to contrast the levels of 100 groups?}
-
-\deseqtwo{} will work with any kind of design specified using the R
-formula. We enourage users to consider exploratory data analysis such
-as principal components analysis as described in Section~\ref{sec:pca}, 
-rather than performing statistical testing of all combinations of
-dozens of groups. 
-
-As a speed concern with fitting very large models, 
-note that each additional level of a factor in the
-design formula adds another parameter to the GLM which is fit by
-\deseqtwo. Users might consider first removing genes with very few
-reads, e.g.\ genes with row sum of 1, as this will speed up the
-fitting procedure.
-
-\subsection{Can I use DESeq2 to analyze a dataset without replicates?}
-
-If a \Rclass{DESeqDataSet} is provided with an experimental design without replicates,
-a warning is printed, that the samples are treated as replicates
-for estimation of dispersion. This kind of analysis is
-only useful for exploring the data, but will not provide the kind of
-proper statistical inference on differences between groups.
-Without biological replicates, it is not possible to estimate the biological
-variability of each gene. 
-More details can be found in the manual page for \Rfunction{?DESeq}.
-
-\subsection{How can I include a continuous covariate in the design formula?}
-
-Continuous covariates can be included in the design formula in the
-same manner as factorial covariates. Continuous covariates might make
-sense in certain experiments, where a constant fold change might be
-expected for each unit of the covariate.  However, in many cases, more
-meaningful results can be obtained by cutting continuous covariates
-into a factor defined over a small number of bins (e.g. 3-5).  In this
-way, the average effect of each group is controlled for, regardless of
-the trend over the continuous covariates.  In R, \Rclass{numeric}
-vectors can be converted into \Rclass{factors} using the function
-\Rfunction{cut}.
-
-\subsection{Will the log fold change shrinkage ``overshrink'' large differences?}
-
-For most datasets, the application of a prior to the log fold changes
-is a good choice, providing log fold change estimates that are
-more stable across the entire range of mean counts than the maximum
-likelihood estimates (see Figure~\ref{fig:MA} and the \deseqtwo{} paper).
-One situation in which the prior on log fold changes might
-``overshrink'' the estimates is 
-if nearly all genes show no difference across condition, a very
-small set of genes have extremely large differences, and no genes in between.
-A simulated example of such a dataset is Figure~\ref{figure/overShrink-1}.
-This is not likely to be the case for most experiments, where typically
-there is a range of differences by size: some genes with medium-to-large
-differences across treatment, and some with small differences.
-
-<<overShrink, echo=FALSE, fig.width=5, fig.height=5, fig.show="asis", fig.small=TRUE, fig.cap="Example of a dataset with where the log fold change prior should be turned off.  Here we show a simulated MA-plot, where nearly all of the log fold changes are falling near the x-axis, with three genes that have very large log fold changes (note the y-axis is from -10 to 10 on the log2 scale). This would indicate a dataset where the log fold change prior would ``overshrink'' the large fold chan [...]
-plot(c(10^rnorm(1000, 3, 2),300,2000,5000), 
-     c(rnorm(1000, 0, .15), -5.5, -8.5, 7.5),
-     ylim=c(-10,10), log="x", cex=.4,
-     xlab="mean of normalized counts", 
-     ylab="log2 fold change")
-abline(h=0, col=rgb(1,0,0,.7))
-@ 
-
-There could be experiments in which only a few genes have
-very large log fold changes, and the rest of the genes are
-nearly constant across treatment.
-Or, there could be artificially constructed libraries fitting this description,
-e.g. technical replicates where the only difference across libraries 
-is the concentration of a few spiked-in genes.
-``Overshrinking'' of a few large log fold changes
-can be assessed by running \Rfunction{results} with \Rcode{addMLE=TRUE},
-which will print a results table with columns for the shrunken and
-unshrunken (MLE) log fold changes.
-The two estimates can be visually compared by running \Rfunction{plotMA} with
-\Rcode{MLE=TRUE} and \Rcode{MLE=FALSE}. 
-If ``overshrinking'' very large log fold changes is a concern,
-it is better to turn off the log fold change prior by
-running \Rfunction{DESeq} with \Rcode{betaPrior=FALSE}.
-
-Even more detail: how do we avoid overshrinking on typical datasets?
-The answer is that we estimate the width of the log fold change prior in a
-robust way to accommodate the very largest log fold changes, and so to
-avoid overshrinking. 
-The details of the prior estimation are described in the manual page for
-\Rfunction{nbinomWaldTest}. Briefly, a weighted upper quantile
-is used to match the width of the log fold change prior to the upper
-5\% of the MLE log fold changes, weighting by the expected sampling
-variability of the estimated log fold changes given the mean count for
-each gene. Note that this does not equal an assumption that 5\% of genes are
-differentially expressed, but that a reasonable width of a log fold
-change distribution can be obtained from the upper 5\% of MLE log fold
-changes. 
-
-\subsection{I ran a likelihood ratio test, but \texttt{results()} only gives me one comparison.}
-
-``\dots How do I get the $p$ values for all of the variables/levels 
-that were removed in the reduced design?''
-
-This is explained in the help page for \texttt{?results} in the
-section about likelihood ratio test p-values, but we will restate the
-answer here. When one performs a likelihood ratio test, the $p$ values and
-the test statistic (the \Robject{stat} column) are values for the test
-that removes all of the variables which are present in the full
-design and not in the reduced design. This tests the null hypothesis
-that all the coefficients from these variables and levels of these factors
-are equal to zero.
-
-The likelihood ratio test $p$ values therefore
-represent a test of \textit{all the variables and all the levels of factors}
-which are among these variables. However, the results table only has space for
-one column of log fold change, so a single variable and a single
-comparison is shown (among the potentially multiple log fold changes
-which were tested in the likelihood ratio test). 
-This is indicated at the top of the results table
-with the text, e.g.: ``log2 fold change (MLE): condition C vs A'' followed
-by ``LRT p-value: '\lowtilde{} batch + condition' vs '\lowtilde{} batch' ''.
-This indicates that the $p$ value is for the likelihood ratio test of
-\textit{all the variables and all the levels}, while the log fold change is a single
-comparison from among those variables and levels.
-See the help page for \Rfunction{results} for more details.
-
-\subsection{What are the exact steps performed by \Rfunction{DESeq()}?}
-
-See the manual page for \Rfunction{DESeq}, which links to the 
-subfunctions which are called in order, where complete details are listed.
-
-\subsection{Is there an official Galaxy tool for DESeq2?}
-
-Yes. The repository for the \deseqtwo{} tool is
-\url{https://github.com/galaxyproject/tools-iuc/tree/master/tools/deseq2} 
-and a link to its location in the Tool Shed is 
-\url{https://toolshed.g2.bx.psu.edu/view/iuc/deseq2/d983d19fbbab}.
-
-\subsection{I want to benchmark DESeq2 comparing to other DE tools.}
-
-One aspect which can cause problems for comparison is that, by default,
-\deseqtwo{} outputs \Rcode{NA} values for adjusted $p$ values based on 
-independent filtering of genes which have low counts.
-This is a way for the \deseqtwo{} to give extra
-information on why the adjusted $p$ value for this gene is not small.
-Additionally, $p$ values can be set to \Rcode{NA} based on extreme 
-count outlier detection (see Section~\ref{sec:moreInfo} for full details). 
-These \Rcode{NA} values should be considered
-negatives for purposes of estimating sensitivity and specificity. The
-easiest way to work with the adjusted $p$ values in a benchmarking
-context is probably to convert these \Rcode{NA} values to 1:
-
-<<convertNA, eval=FALSE>>=
-res$padj <- ifelse(is.na(res$padj), 1, res$padj)
-@ 
-
-\section{Acknowledgments}
-
-We have benefited in the development of \deseqtwo{} from the help and
-feedback of many individuals, including but not limited to: 
-The Bionconductor Core Team,
-Alejandro Reyes, Andrzej Ole\'s, Aleksandra Pekowska, Felix Klein,
-Nikolaos Ignatiadis,
-Vince Carey,
-Owen Solberg,
-Ruping Sun,
-Devon Ryan, 
-Steve Lianoglou, Jessica Larson, Christina Chaivorapol, Pan Du, Richard Bourgon,
-Willem Talloen, 
-Elin Videvall, Hanneke van Deutekom,
-Todd Burwell, 
-Jesse Rowley,
-Igor Dolgalev,
-Stephen Turner,
-Ryan C Thompson,
-Tyr Wiesner-Hanks,
-Konrad Rudolph,
-David Robinson,
-Mingxiang Teng,
-Mathias Lesche,
-Sonali Arora,
-Jordan Ramilowski,
-Ian Dworkin,
-Bj\"orn Gr\"uning,
-Ryan McMinds,
-Paul Gordon,
-Leonardo Collado Torres,
-Enrico Ferrero.
-\section{Session Info}
-
-<<sessInfo, results="asis", echo=FALSE>>=
-toLatex(sessionInfo())
-@
-
-<<resetOptions, results="hide", echo=FALSE>>=
-options(prompt="> ", continue="+ ")
-@ 
-
-\bibliography{library}
-
-\end{document}
diff --git a/inst/doc/DESeq2.html b/inst/doc/DESeq2.html
new file mode 100644
index 0000000..5bf07da
--- /dev/null
+++ b/inst/doc/DESeq2.html
@@ -0,0 +1,1756 @@
+<!DOCTYPE html>
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+
+<meta charset="utf-8" />
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="pandoc" />
+
+
+<meta name="author" content="Michael I. Love, Simon Anders, and Wolfgang Huber" />
+
+
+<title>Analyzing RNA-seq data with DESeq2</title>
+
+<script src="data:application/x-javascript;base64,LyohIGpRdWVyeSB2MS4xMS4zIHwgKGMpIDIwMDUsIDIwMTUgalF1ZXJ5IEZvdW5kYXRpb24sIEluYy4gfCBqcXVlcnkub3JnL2xpY2Vuc2UgKi8KIWZ1bmN0aW9uKGEsYil7Im9iamVjdCI9PXR5cGVvZiBtb2R1bGUmJiJvYmplY3QiPT10eXBlb2YgbW9kdWxlLmV4cG9ydHM/bW9kdWxlLmV4cG9ydHM9YS5kb2N1bWVudD9iKGEsITApOmZ1bmN0aW9uKGEpe2lmKCFhLmRvY3VtZW50KXRocm93IG5ldyBFcnJvcigialF1ZXJ5IHJlcXVpcmVzIGEgd2luZG93IHdpdGggYSBkb2N1bWVudCIpO3JldHVybiBiKGEpfTpiKGEpfSgidW5kZWZpbmVkIiE9dHlwZW9mIHdpbmRvdz93aW5kb3c6dG [...]
+<meta name="viewport" content="width=device-width, initial-scale=1" />
+<link href="data:text/css;charset=utf-8,html%7Bfont%2Dfamily%3Asans%2Dserif%3B%2Dwebkit%2Dtext%2Dsize%2Dadjust%3A100%25%3B%2Dms%2Dtext%2Dsize%2Dadjust%3A100%25%7Dbody%7Bmargin%3A0%7Darticle%2Caside%2Cdetails%2Cfigcaption%2Cfigure%2Cfooter%2Cheader%2Chgroup%2Cmain%2Cmenu%2Cnav%2Csection%2Csummary%7Bdisplay%3Ablock%7Daudio%2Ccanvas%2Cprogress%2Cvideo%7Bdisplay%3Ainline%2Dblock%3Bvertical%2Dalign%3Abaseline%7Daudio%3Anot%28%5Bcontrols%5D%29%7Bdisplay%3Anone%3Bheight%3A0%7D%5Bhidden%5D%2Ctem [...]
+<script src="data:application/x-javascript;base64,LyohCiAqIEJvb3RzdHJhcCB2My4zLjUgKGh0dHA6Ly9nZXRib290c3RyYXAuY29tKQogKiBDb3B5cmlnaHQgMjAxMS0yMDE1IFR3aXR0ZXIsIEluYy4KICogTGljZW5zZWQgdW5kZXIgdGhlIE1JVCBsaWNlbnNlCiAqLwppZigidW5kZWZpbmVkIj09dHlwZW9mIGpRdWVyeSl0aHJvdyBuZXcgRXJyb3IoIkJvb3RzdHJhcCdzIEphdmFTY3JpcHQgcmVxdWlyZXMgalF1ZXJ5Iik7K2Z1bmN0aW9uKGEpeyJ1c2Ugc3RyaWN0Ijt2YXIgYj1hLmZuLmpxdWVyeS5zcGxpdCgiICIpWzBdLnNwbGl0KCIuIik7aWYoYlswXTwyJiZiWzFdPDl8fDE9PWJbMF0mJjk9PWJbMV0mJmJbMl08MSl0aHJvdy [...]
+<script src="data:application/x-javascript;base64,LyoqCiogQHByZXNlcnZlIEhUTUw1IFNoaXYgMy43LjIgfCBAYWZhcmthcyBAamRhbHRvbiBAam9uX25lYWwgQHJlbSB8IE1JVC9HUEwyIExpY2Vuc2VkCiovCi8vIE9ubHkgcnVuIHRoaXMgY29kZSBpbiBJRSA4CmlmICghIXdpbmRvdy5uYXZpZ2F0b3IudXNlckFnZW50Lm1hdGNoKCJNU0lFIDgiKSkgewohZnVuY3Rpb24oYSxiKXtmdW5jdGlvbiBjKGEsYil7dmFyIGM9YS5jcmVhdGVFbGVtZW50KCJwIiksZD1hLmdldEVsZW1lbnRzQnlUYWdOYW1lKCJoZWFkIilbMF18fGEuZG9jdW1lbnRFbGVtZW50O3JldHVybiBjLmlubmVySFRNTD0ieDxzdHlsZT4iK2IrIjwvc3R5bGU+IixkLm [...]
+<script src="data:application/x-javascript;base64,LyohIFJlc3BvbmQuanMgdjEuNC4yOiBtaW4vbWF4LXdpZHRoIG1lZGlhIHF1ZXJ5IHBvbHlmaWxsICogQ29weXJpZ2h0IDIwMTMgU2NvdHQgSmVobAogKiBMaWNlbnNlZCB1bmRlciBodHRwczovL2dpdGh1Yi5jb20vc2NvdHRqZWhsL1Jlc3BvbmQvYmxvYi9tYXN0ZXIvTElDRU5TRS1NSVQKICogICovCgovLyBPbmx5IHJ1biB0aGlzIGNvZGUgaW4gSUUgOAppZiAoISF3aW5kb3cubmF2aWdhdG9yLnVzZXJBZ2VudC5tYXRjaCgiTVNJRSA4IikpIHsKIWZ1bmN0aW9uKGEpeyJ1c2Ugc3RyaWN0IjthLm1hdGNoTWVkaWE9YS5tYXRjaE1lZGlhfHxmdW5jdGlvbihhKXt2YXIgYixjPWEuZG [...]
+<script src="data:application/x-javascript;base64,CgovKioKICogalF1ZXJ5IFBsdWdpbjogU3RpY2t5IFRhYnMKICoKICogQGF1dGhvciBBaWRhbiBMaXN0ZXIgPGFpZGFuQHBocC5uZXQ+CiAqIGFkYXB0ZWQgYnkgUnViZW4gQXJzbGFuIHRvIGFjdGl2YXRlIHBhcmVudCB0YWJzIHRvbwogKiBodHRwOi8vd3d3LmFpZGFubGlzdGVyLmNvbS8yMDE0LzAzL3BlcnNpc3RpbmctdGhlLXRhYi1zdGF0ZS1pbi1ib290c3RyYXAvCiAqLwooZnVuY3Rpb24oJCkgewogICJ1c2Ugc3RyaWN0IjsKICAkLmZuLnJtYXJrZG93blN0aWNreVRhYnMgPSBmdW5jdGlvbigpIHsKICAgIHZhciBjb250ZXh0ID0gdGhpczsKICAgIC8vIFNob3cgdGhlIHRhYi [...]
+
+
+<style type="text/css">code{white-space: pre;}</style>
+<style type="text/css">
+div.sourceCode { overflow-x: auto; }
+table.sourceCode, tr.sourceCode, td.lineNumbers, td.sourceCode {
+  margin: 0; padding: 0; vertical-align: baseline; border: none; }
+table.sourceCode { width: 100%; line-height: 100%; }
+td.lineNumbers { text-align: right; padding-right: 4px; padding-left: 4px; color: #aaaaaa; border-right: 1px solid #aaaaaa; }
+td.sourceCode { padding-left: 5px; }
+code > span.kw { color: #007020; font-weight: bold; } /* Keyword */
+code > span.dt { color: #902000; } /* DataType */
+code > span.dv { color: #40a070; } /* DecVal */
+code > span.bn { color: #40a070; } /* BaseN */
+code > span.fl { color: #40a070; } /* Float */
+code > span.ch { color: #4070a0; } /* Char */
+code > span.st { color: #4070a0; } /* String */
+code > span.co { color: #60a0b0; font-style: italic; } /* Comment */
+code > span.ot { color: #007020; } /* Other */
+code > span.al { color: #ff0000; font-weight: bold; } /* Alert */
+code > span.fu { color: #06287e; } /* Function */
+code > span.er { color: #ff0000; font-weight: bold; } /* Error */
+code > span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */
+code > span.cn { color: #880000; } /* Constant */
+code > span.sc { color: #4070a0; } /* SpecialChar */
+code > span.vs { color: #4070a0; } /* VerbatimString */
+code > span.ss { color: #bb6688; } /* SpecialString */
+code > span.im { } /* Import */
+code > span.va { color: #19177c; } /* Variable */
+code > span.cf { color: #007020; font-weight: bold; } /* ControlFlow */
+code > span.op { color: #666666; } /* Operator */
+code > span.bu { } /* BuiltIn */
+code > span.ex { } /* Extension */
+code > span.pp { color: #bc7a00; } /* Preprocessor */
+code > span.at { color: #7d9029; } /* Attribute */
+code > span.do { color: #ba2121; font-style: italic; } /* Documentation */
+code > span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */
+code > span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */
+code > span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */
+</style>
+<style type="text/css">
+  pre:not([class]) {
+    background-color: white;
+  }
+</style>
+
+<style type="text/css">
+  p.abstract{
+    text-align: center;
+    font-weight: bold;
+  }
+  div.abstract{
+    margin: auto;
+    width: 90%;
+  }
+</style>
+
+<style type="text/css">
+h1 {
+  font-size: 34px;
+}
+h1.title {
+  font-size: 38px;
+}
+h2 {
+  font-size: 30px;
+}
+h3 {
+  font-size: 24px;
+}
+h4 {
+  font-size: 18px;
+}
+h5 {
+  font-size: 16px;
+}
+h6 {
+  font-size: 12px;
+}
+.table th:not([align]) {
+  text-align: left;
+}
+</style>
+
+
+</head>
+
+<body>
+
+<style type="text/css">
+.main-container {
+  max-width: 940px;
+  margin-left: auto;
+  margin-right: auto;
+}
+code {
+  color: inherit;
+  background-color: rgba(0, 0, 0, 0.04);
+}
+img {
+  max-width:100%;
+  height: auto;
+}
+.tabbed-pane {
+  padding-top: 12px;
+}
+button.code-folding-btn:focus {
+  outline: none;
+}
+</style>
+
+
+
+<div class="container-fluid main-container">
+
+<!-- tabsets -->
+<script>
+$(document).ready(function () {
+  window.buildTabsets("TOC");
+});
+</script>
+
+<!-- code folding -->
+
+
+
+
+
+
+<div class="fluid-row" id="header">
+
+
+
+<h1 class="title toc-ignore">Analyzing RNA-seq data with DESeq2</h1>
+<h4 class="author"><em>Michael I. Love, Simon Anders, and Wolfgang Huber</em></h4>
+<h4 class="date"><em>5 May 2017</em></h4>
+<div class="abstract">
+<p class="abstract">Abstract</p>
+<p>A basic task in the analysis of count data from RNA-seq is the detection of differentially expressed genes. The count data are presented as a table which reports, for each sample, the number of sequence fragments that have been assigned to each gene. Analogous data also arise for other assay types, including comparative ChIP-Seq, HiC, shRNA screening, mass spectrometry. An important analysis question is the quantification and statistical inference of systematic changes between conditi [...]
+</div>
+
+</div>
+
+<div id="TOC">
+<ul>
+<li><a href="#standard-workflow">Standard workflow</a><ul>
+<li><a href="#quick-start">Quick start</a></li>
+<li><a href="#how-to-get-help-for-deseq2">How to get help for DESeq2</a></li>
+<li><a href="#input-data">Input data</a><ul>
+<li><a href="#why-un-normalized-counts">Why un-normalized counts?</a></li>
+<li><a href="#the-deseqdataset">The DESeqDataSet</a></li>
+<li><a href="#transcript-abundance-files-and-tximport-input">Transcript abundance files and <em>tximport</em> input</a></li>
+<li><a href="#count-matrix-input">Count matrix input</a></li>
+<li><a href="#htseq-count-input"><em>htseq-count</em> input</a></li>
+<li><a href="#summarizedexperiment-input"><em>SummarizedExperiment</em> input</a></li>
+<li><a href="#pre-filtering">Pre-filtering</a></li>
+<li><a href="#note-on-factor-levels">Note on factor levels</a></li>
+<li><a href="#collapsing-technical-replicates">Collapsing technical replicates</a></li>
+<li><a href="#about-the-pasilla-dataset">About the pasilla dataset</a></li>
+</ul></li>
+<li><a href="#differential-expression-analysis">Differential expression analysis</a></li>
+<li><a href="#exploring-and-exporting-results">Exploring and exporting results</a><ul>
+<li><a href="#ma-plot">MA-plot</a></li>
+<li><a href="#plot-counts">Plot counts</a></li>
+<li><a href="#more-information-on-results-columns">More information on results columns</a></li>
+<li><a href="#rich-visualization-and-reporting-of-results">Rich visualization and reporting of results</a></li>
+<li><a href="#exporting-results-to-csv-files">Exporting results to CSV files</a></li>
+</ul></li>
+<li><a href="#multi-factor-designs">Multi-factor designs</a></li>
+</ul></li>
+<li><a href="#data-transformations-and-visualization">Data transformations and visualization</a><ul>
+<li><a href="#count-data-transformations">Count data transformations</a><ul>
+<li><a href="#blind-dispersion-estimation">Blind dispersion estimation</a></li>
+<li><a href="#extracting-transformed-values">Extracting transformed values</a></li>
+<li><a href="#regularized-log-transformation">Regularized log transformation</a></li>
+<li><a href="#variance-stabilizing-transformation">Variance stabilizing transformation</a></li>
+<li><a href="#effects-of-transformations-on-the-variance">Effects of transformations on the variance</a></li>
+</ul></li>
+<li><a href="#data-quality-assessment-by-sample-clustering-and-visualization">Data quality assessment by sample clustering and visualization</a><ul>
+<li><a href="#heatmap-of-the-count-matrix">Heatmap of the count matrix</a></li>
+<li><a href="#heatmap-of-the-sample-to-sample-distances">Heatmap of the sample-to-sample distances</a></li>
+<li><a href="#principal-component-plot-of-the-samples">Principal component plot of the samples</a></li>
+</ul></li>
+</ul></li>
+<li><a href="#variations-to-the-standard-workflow">Variations to the standard workflow</a><ul>
+<li><a href="#wald-test-individual-steps">Wald test individual steps</a></li>
+<li><a href="#contrasts">Contrasts</a></li>
+<li><a href="#interactions">Interactions</a></li>
+<li><a href="#time-series-experiments">Time-series experiments</a></li>
+<li><a href="#likelihood-ratio-test">Likelihood ratio test</a></li>
+<li><a href="#approach-to-count-outliers">Approach to count outliers</a></li>
+<li><a href="#dispersion-plot-and-fitting-alternatives">Dispersion plot and fitting alternatives</a><ul>
+<li><a href="#local-or-mean-dispersion-fit">Local or mean dispersion fit</a></li>
+<li><a href="#supply-a-custom-dispersion-fit">Supply a custom dispersion fit</a></li>
+</ul></li>
+<li><a href="#independent-filtering-of-results">Independent filtering of results</a></li>
+<li><a href="#tests-of-log2-fold-change-above-or-below-a-threshold">Tests of log2 fold change above or below a threshold</a></li>
+<li><a href="#access-to-all-calculated-values">Access to all calculated values</a></li>
+<li><a href="#sample-gene-dependent-normalization-factors">Sample-/gene-dependent normalization factors</a></li>
+<li><a href="#model-matrix-not-full-rank">“Model matrix not full rank”</a><ul>
+<li><a href="#linear-combinations">Linear combinations</a></li>
+<li><a href="#group-specific-condition-effects-individuals-nested-within-groups">Group-specific condition effects, individuals nested within groups</a></li>
+<li><a href="#levels-without-samples">Levels without samples</a></li>
+</ul></li>
+</ul></li>
+<li><a href="#theory-behind-deseq2">Theory behind DESeq2</a><ul>
+<li><a href="#the-deseq2-model">The DESeq2 model</a></li>
+<li><a href="#changes-compared-to-deseq">Changes compared to DESeq</a></li>
+<li><a href="#methods-changes-since-the-2014-deseq2-paper">Methods changes since the 2014 DESeq2 paper</a></li>
+<li><a href="#count-outlier-detection">Count outlier detection</a></li>
+<li><a href="#contrasts-1">Contrasts</a></li>
+<li><a href="#expanded-model-matrices">Expanded model matrices</a></li>
+<li><a href="#independent-filtering-and-multiple-testing">Independent filtering and multiple testing</a><ul>
+<li><a href="#filtering-criteria">Filtering criteria</a></li>
+<li><a href="#why-does-it-work">Why does it work?</a></li>
+</ul></li>
+</ul></li>
+<li><a href="#frequently-asked-questions">Frequently asked questions</a><ul>
+<li><a href="#how-can-i-get-support-for-deseq2">How can I get support for DESeq2?</a></li>
+<li><a href="#why-are-some-p-values-set-to-na">Why are some <em>p</em> values set to NA?</a></li>
+<li><a href="#how-can-i-get-unfiltered-deseq2-results">How can I get unfiltered DESeq2 results?</a></li>
+<li><a href="#how-do-i-use-vst-or-rlog-data-for-differential-testing">How do I use VST or rlog data for differential testing?</a></li>
+<li><a href="#can-i-use-deseq2-to-analyze-paired-samples">Can I use DESeq2 to analyze paired samples?</a></li>
+<li><a href="#if-i-have-multiple-groups-should-i-run-all-together-or-split-into-pairs-of-groups">If I have multiple groups, should I run all together or split into pairs of groups?</a></li>
+<li><a href="#can-i-run-deseq2-to-contrast-the-levels-of-many-groups">Can I run DESeq2 to contrast the levels of many groups?</a></li>
+<li><a href="#can-i-use-deseq2-to-analyze-a-dataset-without-replicates">Can I use DESeq2 to analyze a dataset without replicates?</a></li>
+<li><a href="#how-can-i-include-a-continuous-covariate-in-the-design-formula">How can I include a continuous covariate in the design formula?</a></li>
+<li><a href="#i-ran-a-likelihood-ratio-test-but-results-only-gives-me-one-comparison.">I ran a likelihood ratio test, but results() only gives me one comparison.</a></li>
+<li><a href="#what-are-the-exact-steps-performed-by-deseq">What are the exact steps performed by DESeq()?</a></li>
+<li><a href="#is-there-an-official-galaxy-tool-for-deseq2">Is there an official Galaxy tool for DESeq2?</a></li>
+<li><a href="#i-want-to-benchmark-deseq2-comparing-to-other-de-tools.">I want to benchmark DESeq2 comparing to other DE tools.</a></li>
+<li><a href="#i-have-trouble-installing-deseq2-on-ubuntulinux">I have trouble installing DESeq2 on Ubuntu/Linux…</a></li>
+</ul></li>
+<li><a href="#acknowledgments">Acknowledgments</a></li>
+<li><a href="#session-info">Session info</a></li>
+<li><a href="#references">References</a></li>
+</ul>
+</div>
+
+<!-- This is the source document -->
+<div id="standard-workflow" class="section level1">
+<h1>Standard workflow</h1>
+<p><strong>If you use DESeq2 in published research, please cite:</strong></p>
+<blockquote>
+<p>Love, M.I., Huber, W., Anders, S., Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2, <em>Genome Biology</em> 2014, <strong>15</strong>:550. <a href="http://dx.doi.org/10.1186/s13059-014-0550-8">10.1186/s13059-014-0550-8</a></p>
+</blockquote>
+<p>Other Bioconductor packages with similar aims are <a href="http://bioconductor.org/packages/edgeR">edgeR</a>, <a href="http://bioconductor.org/packages/limma">limma</a>, <a href="http://bioconductor.org/packages/DSS">DSS</a>, <a href="http://bioconductor.org/packages/EBSeq">EBSeq</a>, and <a href="http://bioconductor.org/packages/baySeq">baySeq</a>.</p>
+<div id="quick-start" class="section level2">
+<h2>Quick start</h2>
+<p>Here we show the most basic steps for a differential expression analysis. There are a variety of steps upstream of DESeq2 that result in the generation of counts or estimated counts for each sample, which we will discuss in the sections below. This code chunk assumes that you have a count matrix called <code>cts</code> and a table of sample information called <code>coldata</code>. The <code>design</code> indicates how to model the samples, here, that we want to measure the effect of t [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span><span class="kw">DESeqDataSetFromMatrix</span>(<span class="dt">countData =</span> cts,
+                              <span class="dt">colData =</span> coldata,
+                              <span class="dt">design=</span> ~<span class="st"> </span>batch +<span class="st"> </span>condition)
+dds <-<span class="st"> </span><span class="kw">DESeq</span>(dds)
+res <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">contrast=</span><span class="kw">c</span>(<span class="st">"condition"</span>,<span class="st">"treated"</span>,<span class="st">"control"</span>))</code></pre></div>
+<p>The following starting functions will be explained below:</p>
+<ul>
+<li>If you have transcript quantification files, as produced by <em>Salmon</em>, <em>Sailfish</em>, or <em>kallisto</em>, you would use <em>DESeqDataSetFromTximport</em>.</li>
+<li>If you have <em>htseq-count</em> files, the first line would use <em>DESeqDataSetFromHTSeq</em>.</li>
+<li>If you have a <em>RangedSummarizedExperiment</em>, the first line would use <em>DESeqDataSet</em>.</li>
+</ul>
+</div>
+<div id="how-to-get-help-for-deseq2" class="section level2">
+<h2>How to get help for DESeq2</h2>
+<p>Any and all DESeq2 questions should be posted to the <strong>Bioconductor support site</strong>, which serves as a searchable knowledge base of questions and answers:</p>
+<p><a href="https://support.bioconductor.org" class="uri">https://support.bioconductor.org</a></p>
+<p>Posting a question and tagging with “DESeq2” will automatically send an alert to the package authors to respond on the support site. See the first question in the list of <a href="#FAQ">Frequently Asked Questions</a> (FAQ) for information about how to construct an informative post.</p>
+<p>You should <strong>not</strong> email your question to the package authors, as we will just reply that the question should be posted to the <strong>Bioconductor support site</strong>.</p>
+</div>
+<div id="input-data" class="section level2">
+<h2>Input data</h2>
+<div id="why-un-normalized-counts" class="section level3">
+<h3>Why un-normalized counts?</h3>
+<p>As input, the DESeq2 package expects count data as obtained, e.g., from RNA-seq or another high-throughput sequencing experiment, in the form of a matrix of integer values. The value in the <em>i</em>-th row and the <em>j</em>-th column of the matrix tells how many reads can be assigned to gene <em>i</em> in sample <em>j</em>. Analogously, for other types of assays, the rows of the matrix might correspond e.g. to binding regions (with ChIP-Seq) or peptide sequences (with quantitative  [...]
+<p>The values in the matrix should be un-normalized counts or estimated counts of sequencing reads (for single-end RNA-seq) or fragments (for paired-end RNA-seq). The <a href="http://www.bioconductor.org/help/workflows/rnaseqGene/">RNA-seq workflow</a> describes multiple techniques for preparing such count matrices. It is important to provide count matrices as input for DESeq2’s statistical model <span class="citation">(Love, Huber, and Anders 2014)</span> to hold, as only the count valu [...]
+</div>
+<div id="the-deseqdataset" class="section level3">
+<h3>The DESeqDataSet</h3>
+<p>The object class used by the DESeq2 package to store the read counts and the intermediate estimated quantities during statistical analysis is the <em>DESeqDataSet</em>, which will usually be represented in the code here as an object <code>dds</code>.</p>
+<p>A technical detail is that the <em>DESeqDataSet</em> class extends the <em>RangedSummarizedExperiment</em> class of the <a href="http://bioconductor.org/packages/SummarizedExperiment">SummarizedExperiment</a> package. The “Ranged” part refers to the fact that the rows of the assay data (here, the counts) can be associated with genomic ranges (the exons of genes). This association facilitates downstream exploration of results, making use of other Bioconductor packages’ range-based func [...]
+<p>A <em>DESeqDataSet</em> object must have an associated <em>design formula</em>. The design formula expresses the variables which will be used in modeling. The formula should be a tilde (~) followed by the variables with plus signs between them (it will be coerced into an <em>formula</em> if it is not already). The design can be changed later, however then all differential analysis steps should be repeated, as the design formula is used to estimate the dispersions and to estimate the l [...]
+<p><em>Note</em>: In order to benefit from the default settings of the package, you should put the variable of interest at the end of the formula and make sure the control level is the first level.</p>
+<p>We will now show 4 ways of constructing a <em>DESeqDataSet</em>, depending on what pipeline was used upstream of DESeq2 to generated counts or estimated counts:</p>
+<ol style="list-style-type: decimal">
+<li>From <a href="#tximport">transcript abundance files and tximport</a></li>
+<li>From a <a href="#countmat">count matrix</a></li>
+<li>From <a href="#htseq">htseq-count files</a></li>
+<li>From a <a href="#se">SummarizedExperiment</a> object</li>
+</ol>
+<p><a name="tximport"></a></p>
+</div>
+<div id="transcript-abundance-files-and-tximport-input" class="section level3">
+<h3>Transcript abundance files and <em>tximport</em> input</h3>
+<p>A newer and recommended pipeline is to use fast transcript abundance quantifiers upstream of DESeq2, and then to create gene-level count matrices for use with DESeq2 by importing the quantification data using the <a href="http://bioconductor.org/packages/tximport">tximport</a> package. This workflow allows users to import transcript abundance estimates from a variety of external software, including the following methods:</p>
+<ul>
+<li><a href="http://combine-lab.github.io/salmon/">Salmon</a> <span class="citation">(Patro et al. 2016)</span></li>
+<li><a href="http://www.cs.cmu.edu/~ckingsf/software/sailfish/">Sailfish</a> <span class="citation">(Patro, Mount, and Kingsford 2014)</span></li>
+<li><a href="https://pachterlab.github.io/kallisto/about.html">kallisto</a> <span class="citation">(Bray et al. 2016)</span></li>
+<li><a href="http://deweylab.github.io/RSEM/">RSEM</a> <span class="citation">(Li and Dewey 2011)</span></li>
+</ul>
+<p>Some advantages of using the above methods for transcript abundance estimation are: (i) this approach corrects for potential changes in gene length across samples (e.g. from differential isoform usage) <span class="citation">(Trapnell et al. 2013)</span>, (ii) some of these methods (<em>Salmon</em>, <em>Sailfish</em>, <em>kallisto</em>) are substantially faster and require less memory and disk usage compared to alignment-based methods that require creation and storage of BAM files, an [...]
+<p>Full details on the motivation and methods for importing transcript level abundance and count estimates, summarizing to gene-level count matrices and producing an offset which corrects for potential changes in average transcript length across samples are described in <span class="citation">(Soneson, Love, and Robinson 2015)</span>. Note that the tximport-to-DESeq2 approach uses <em>estimated</em> gene counts from the transcript abundance quantifiers, but not <em>normalized</em> counts.</p>
+<p>Here, we demonstrate how to import transcript abundances and construct of a gene-level <em>DESeqDataSet</em> object from <em>Salmon</em> <code>quant.sf</code> files, which are stored in the <a href="http://bioconductor.org/packages/tximportData">tximportData</a> package. You do not need the <code>tximportData</code> package for your analysis, it is only used here for demonstration.</p>
+<p>Note that, instead of locating <code>dir</code> using <em>system.file</em>, a user would typically just provide a path, e.g. <code>/path/to/quant/files</code>. For a typical use, the <code>condition</code> information should already be present as a column of the sample table <code>samples</code>, while here we construct artificial condition labels for demonstration.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"tximport"</span>)
+<span class="kw">library</span>(<span class="st">"readr"</span>)
+<span class="kw">library</span>(<span class="st">"tximportData"</span>)
+dir <-<span class="st"> </span><span class="kw">system.file</span>(<span class="st">"extdata"</span>, <span class="dt">package=</span><span class="st">"tximportData"</span>)
+samples <-<span class="st"> </span><span class="kw">read.table</span>(<span class="kw">file.path</span>(dir,<span class="st">"samples.txt"</span>), <span class="dt">header=</span><span class="ot">TRUE</span>)
+samples$condition <-<span class="st"> </span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="kw">c</span>(<span class="st">"A"</span>,<span class="st">"B"</span>),<span class="dt">each=</span><span class="dv">3</span>))
+<span class="kw">rownames</span>(samples) <-<span class="st"> </span>samples$run
+samples[,<span class="kw">c</span>(<span class="st">"pop"</span>,<span class="st">"center"</span>,<span class="st">"run"</span>,<span class="st">"condition"</span>)]</code></pre></div>
+<pre><code>##           pop center       run condition
+## ERR188297 TSI  UNIGE ERR188297         A
+## ERR188088 TSI  UNIGE ERR188088         A
+## ERR188329 TSI  UNIGE ERR188329         A
+## ERR188288 TSI  UNIGE ERR188288         B
+## ERR188021 TSI  UNIGE ERR188021         B
+## ERR188356 TSI  UNIGE ERR188356         B</code></pre>
+<p>Next we specify the path to the files using the appropriate columns of <code>samples</code>, and we read in a table that links transcripts to genes for this dataset.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">files <-<span class="st"> </span><span class="kw">file.path</span>(dir,<span class="st">"salmon"</span>, samples$run, <span class="st">"quant.sf"</span>)
+<span class="kw">names</span>(files) <-<span class="st"> </span>samples$run
+tx2gene <-<span class="st"> </span><span class="kw">read.csv</span>(<span class="kw">file.path</span>(dir, <span class="st">"tx2gene.csv"</span>))</code></pre></div>
+<p>We import the necessary quantification data for DESeq2 using the <em>tximport</em> function. For further details on use of <em>tximport</em>, including the construction of the <code>tx2gene</code> table for linking transcripts to genes in your dataset, please refer to the <a href="http://bioconductor.org/packages/tximport">tximport</a> package vignette.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">txi <-<span class="st"> </span><span class="kw">tximport</span>(files, <span class="dt">type=</span><span class="st">"salmon"</span>, <span class="dt">tx2gene=</span>tx2gene)</code></pre></div>
+<p>Finally, we can construct a <em>DESeqDataSet</em> from the <code>txi</code> object and sample information in <code>samples</code>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"DESeq2"</span>)
+ddsTxi <-<span class="st"> </span><span class="kw">DESeqDataSetFromTximport</span>(txi,
+                                   <span class="dt">colData =</span> samples,
+                                   <span class="dt">design =</span> ~<span class="st"> </span>condition)</code></pre></div>
+<p>The <code>ddsTxi</code> object here can then be used as <code>dds</code> in the following analysis steps.</p>
+<p><a name="countmat"></a></p>
+</div>
+<div id="count-matrix-input" class="section level3">
+<h3>Count matrix input</h3>
+<p>Alternatively, the function <em>DESeqDataSetFromMatrix</em> can be used if you already have a matrix of read counts prepared from another source. Another method for quickly producing count matrices from alignment files is the <em>featureCounts</em> function <span class="citation">(Liao, Smyth, and Shi 2013)</span> in the <a href="http://bioconductor.org/packages/Rsubread">Rsubread</a> package. To use <em>DESeqDataSetFromMatrix</em>, the user should provide the counts matrix, the infor [...]
+<p>To demonstate the use of <em>DESeqDataSetFromMatrix</em>, we will read in count data from the <a href="http://bioconductor.org/packages/pasilla">pasilla</a> package. We read in a count matrix, which we will name <code>cts</code>, and the sample information table, which we will name <code>coldata</code>. Further below we describe how to extract these objects from, e.g. <em>featureCounts</em> output.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"pasilla"</span>)
+pasCts <-<span class="st"> </span><span class="kw">system.file</span>(<span class="st">"extdata"</span>,
+                      <span class="st">"pasilla_gene_counts.tsv"</span>,
+                      <span class="dt">package=</span><span class="st">"pasilla"</span>, <span class="dt">mustWork=</span><span class="ot">TRUE</span>)
+pasAnno <-<span class="st"> </span><span class="kw">system.file</span>(<span class="st">"extdata"</span>,
+                       <span class="st">"pasilla_sample_annotation.csv"</span>,
+                       <span class="dt">package=</span><span class="st">"pasilla"</span>, <span class="dt">mustWork=</span><span class="ot">TRUE</span>)
+cts <-<span class="st"> </span><span class="kw">as.matrix</span>(<span class="kw">read.csv</span>(pasCts,<span class="dt">sep=</span><span class="st">"</span><span class="ch">\t</span><span class="st">"</span>,<span class="dt">row.names=</span><span class="st">"gene_id"</span>))
+coldata <-<span class="st"> </span><span class="kw">read.csv</span>(pasAnno, <span class="dt">row.names=</span><span class="dv">1</span>)
+coldata <-<span class="st"> </span>coldata[,<span class="kw">c</span>(<span class="st">"condition"</span>,<span class="st">"type"</span>)]</code></pre></div>
+<p>We examine the count matrix and column data to see if they are consisent:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(cts)</code></pre></div>
+<pre><code>##             untreated1 untreated2 untreated3 untreated4 treated1 treated2
+## FBgn0000003          0          0          0          0        0        0
+## FBgn0000008         92        161         76         70      140       88
+## FBgn0000014          5          1          0          0        4        0
+## FBgn0000015          0          2          1          2        1        0
+## FBgn0000017       4664       8714       3564       3150     6205     3072
+## FBgn0000018        583        761        245        310      722      299
+##             treated3
+## FBgn0000003        1
+## FBgn0000008       70
+## FBgn0000014        0
+## FBgn0000015        0
+## FBgn0000017     3334
+## FBgn0000018      308</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(coldata)</code></pre></div>
+<pre><code>##              condition        type
+## treated1fb     treated single-read
+## treated2fb     treated  paired-end
+## treated3fb     treated  paired-end
+## untreated1fb untreated single-read
+## untreated2fb untreated single-read
+## untreated3fb untreated  paired-end</code></pre>
+<p>Note that these are not in the same order with respect to samples!</p>
+<p>It is critical that the columns of the count matrix and the rows of the column data (information about samples) are in the same order. We should re-arrange one or the other so that they are consistent in terms of sample order (if we do not, later functions would produce an error). We additionally need to chop off the <code>"fb"</code> of the row names of <code>coldata</code>, so the naming is consistent.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">rownames</span>(coldata) <-<span class="st"> </span><span class="kw">sub</span>(<span class="st">"fb"</span>,<span class="st">""</span>,<span class="kw">rownames</span>(coldata))
+<span class="kw">all</span>(<span class="kw">rownames</span>(coldata) %in%<span class="st"> </span><span class="kw">colnames</span>(cts))</code></pre></div>
+<pre><code>## [1] TRUE</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">cts <-<span class="st"> </span>cts[, <span class="kw">rownames</span>(coldata)]
+<span class="kw">all</span>(<span class="kw">rownames</span>(coldata) ==<span class="st"> </span><span class="kw">colnames</span>(cts))</code></pre></div>
+<pre><code>## [1] TRUE</code></pre>
+<p>If you have used the <em>featureCounts</em> function <span class="citation">(Liao, Smyth, and Shi 2013)</span> in the <a href="http://bioconductor.org/packages/Rsubread">Rsubread</a> package, the matrix of read counts can be directly provided from the <code>"counts"</code> element in the list output. The count matrix and column data can typically be read into R from flat files using base R functions such as <em>read.csv</em> or <em>read.delim</em>. For <em>htseq-count</em> f [...]
+<p>With the count matrix, <code>cts</code>, and the sample information, <code>coldata</code>, we can construct a <em>DESeqDataSet</em>:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"DESeq2"</span>)
+dds <-<span class="st"> </span><span class="kw">DESeqDataSetFromMatrix</span>(<span class="dt">countData =</span> cts,
+                              <span class="dt">colData =</span> coldata,
+                              <span class="dt">design =</span> ~<span class="st"> </span>condition)
+dds</code></pre></div>
+<pre><code>## class: DESeqDataSet 
+## dim: 14599 7 
+## metadata(1): version
+## assays(1): counts
+## rownames(14599): FBgn0000003 FBgn0000008 ... FBgn0261574
+##   FBgn0261575
+## rowData names(0):
+## colnames(7): treated1 treated2 ... untreated3 untreated4
+## colData names(2): condition type</code></pre>
+<p>If you have additional feature data, it can be added to the <em>DESeqDataSet</em> by adding to the metadata columns of a newly constructed object. (Here we add redundant data just for demonstration, as the gene names are already the rownames of the <code>dds</code>.)</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">featureData <-<span class="st"> </span><span class="kw">data.frame</span>(<span class="dt">gene=</span><span class="kw">rownames</span>(cts))
+<span class="kw">mcols</span>(dds) <-<span class="st"> </span><span class="kw">DataFrame</span>(<span class="kw">mcols</span>(dds), featureData)
+<span class="kw">mcols</span>(dds)</code></pre></div>
+<pre><code>## DataFrame with 14599 rows and 1 column
+##              gene
+##          <factor>
+## 1     FBgn0000003
+## 2     FBgn0000008
+## 3     FBgn0000014
+## 4     FBgn0000015
+## 5     FBgn0000017
+## ...           ...
+## 14595 FBgn0261571
+## 14596 FBgn0261572
+## 14597 FBgn0261573
+## 14598 FBgn0261574
+## 14599 FBgn0261575</code></pre>
+<p><a name="htseq"></a></p>
+</div>
+<div id="htseq-count-input" class="section level3">
+<h3><em>htseq-count</em> input</h3>
+<p>You can use the function <em>DESeqDataSetFromHTSeqCount</em> if you have used <em>htseq-count</em> from the <a href="http://www-huber.embl.de/users/anders/HTSeq">HTSeq</a> python package <span class="citation">(Anders, Pyl, and Huber 2014)</span>. For an example of using the python scripts, see the <a href="http://bioconductor.org/packages/pasilla">pasilla</a> data package. First you will want to specify a variable which points to the directory in which the <em>htseq-count</em> output [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">directory <-<span class="st"> "/path/to/your/files/"</span></code></pre></div>
+<p>However, for demonstration purposes only, the following line of code points to the directory for the demo <em>htseq-count</em> output files packages for the <a href="http://bioconductor.org/packages/pasilla">pasilla</a> package.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">directory <-<span class="st"> </span><span class="kw">system.file</span>(<span class="st">"extdata"</span>, <span class="dt">package=</span><span class="st">"pasilla"</span>,
+                         <span class="dt">mustWork=</span><span class="ot">TRUE</span>)</code></pre></div>
+<p>We specify which files to read in using <em>list.files</em>, and select those files which contain the string <code>"treated"</code> using <em>grep</em>. The <em>sub</em> function is used to chop up the sample filename to obtain the condition status, or you might alternatively read in a phenotypic table using <em>read.table</em>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">sampleFiles <-<span class="st"> </span><span class="kw">grep</span>(<span class="st">"treated"</span>,<span class="kw">list.files</span>(directory),<span class="dt">value=</span><span class="ot">TRUE</span>)
+sampleCondition <-<span class="st"> </span><span class="kw">sub</span>(<span class="st">"(.*treated).*"</span>,<span class="st">"</span><span class="ch">\\</span><span class="st">1"</span>,sampleFiles)
+sampleTable <-<span class="st"> </span><span class="kw">data.frame</span>(<span class="dt">sampleName =</span> sampleFiles,
+                          <span class="dt">fileName =</span> sampleFiles,
+                          <span class="dt">condition =</span> sampleCondition)</code></pre></div>
+<p>Then we build the <em>DESeqDataSet</em> using the following function:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"DESeq2"</span>)
+ddsHTSeq <-<span class="st"> </span><span class="kw">DESeqDataSetFromHTSeqCount</span>(<span class="dt">sampleTable =</span> sampleTable,
+                                       <span class="dt">directory =</span> directory,
+                                       <span class="dt">design=</span> ~<span class="st"> </span>condition)
+ddsHTSeq</code></pre></div>
+<pre><code>## class: DESeqDataSet 
+## dim: 70463 7 
+## metadata(1): version
+## assays(1): counts
+## rownames(70463): FBgn0000003:001 FBgn0000008:001 ...
+##   FBgn0261575:001 FBgn0261575:002
+## rowData names(0):
+## colnames(7): treated1fb.txt treated2fb.txt ... untreated3fb.txt
+##   untreated4fb.txt
+## colData names(1): condition</code></pre>
+<p><a name="se"></a></p>
+</div>
+<div id="summarizedexperiment-input" class="section level3">
+<h3><em>SummarizedExperiment</em> input</h3>
+<p>An example of the steps to produce a <em>RangedSummarizedExperiment</em> can be found in the <a href="http://www.bioconductor.org/help/workflows/rnaseqGene/">RNA-seq workflow</a> and in the vignette for the data package <a href="http://bioconductor.org/packages/airway">airway</a>. Here we load the <em>RangedSummarizedExperiment</em> from that package in order to build a <em>DESeqDataSet</em>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"airway"</span>)
+<span class="kw">data</span>(<span class="st">"airway"</span>)
+se <-<span class="st"> </span>airway</code></pre></div>
+<p>The constructor function below shows the generation of a <em>DESeqDataSet</em> from a <em>RangedSummarizedExperiment</em> <code>se</code>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"DESeq2"</span>)
+ddsSE <-<span class="st"> </span><span class="kw">DESeqDataSet</span>(se, <span class="dt">design =</span> ~<span class="st"> </span>cell +<span class="st"> </span>dex)
+ddsSE</code></pre></div>
+<pre><code>## class: DESeqDataSet 
+## dim: 64102 8 
+## metadata(2): '' version
+## assays(1): counts
+## rownames(64102): ENSG00000000003 ENSG00000000005 ... LRG_98 LRG_99
+## rowData names(0):
+## colnames(8): SRR1039508 SRR1039509 ... SRR1039520 SRR1039521
+## colData names(9): SampleName cell ... Sample BioSample</code></pre>
+</div>
+<div id="pre-filtering" class="section level3">
+<h3>Pre-filtering</h3>
+<p>While it is not necessary to pre-filter low count genes before running the DESeq2 functions, there are two reasons which make pre-filtering useful: by removing rows in which there are no reads or nearly no reads, we reduce the memory size of the <code>dds</code> data object and we increase the speed of the transformation and testing functions within DESeq2. Here we perform a minimal pre-filtering to remove rows that have only 0 or 1 read. Note that more strict filtering to increase po [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span>dds[ <span class="kw">rowSums</span>(<span class="kw">counts</span>(dds)) ><span class="st"> </span><span class="dv">1</span>, ]</code></pre></div>
+</div>
+<div id="note-on-factor-levels" class="section level3">
+<h3>Note on factor levels</h3>
+<p>By default, R will choose a <em>reference level</em> for factors based on alphabetical order. Then, if you never tell the DESeq2 functions which level you want to compare against (e.g. which level represents the control group), the comparisons will be based on the alphabetical order of the levels. There are two solutions: you can either explicitly tell <em>results</em> which comparison to make using the <code>contrast</code> argument (this will be shown later), or you can explicitly s [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds$condition <-<span class="st"> </span><span class="kw">factor</span>(dds$condition, <span class="dt">levels=</span><span class="kw">c</span>(<span class="st">"untreated"</span>,<span class="st">"treated"</span>))</code></pre></div>
+<p>…or using <em>relevel</em>, just specifying the reference level:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds$condition <-<span class="st"> </span><span class="kw">relevel</span>(dds$condition, <span class="dt">ref=</span><span class="st">"untreated"</span>)</code></pre></div>
+<p>If you need to subset the columns of a <em>DESeqDataSet</em>, i.e., when removing certain samples from the analysis, it is possible that all the samples for one or more levels of a variable in the design formula would be removed. In this case, the <em>droplevels</em> function can be used to remove those levels which do not have samples in the current <em>DESeqDataSet</em>:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds$condition <-<span class="st"> </span><span class="kw">droplevels</span>(dds$condition)</code></pre></div>
+</div>
+<div id="collapsing-technical-replicates" class="section level3">
+<h3>Collapsing technical replicates</h3>
+<p>DESeq2 provides a function <em>collapseReplicates</em> which can assist in combining the counts from technical replicates into single columns of the count matrix. The term <em>technical replicate</em> implies multiple sequencing runs of the same library. You should not collapse biological replicates using this function. See the manual page for an example of the use of <em>collapseReplicates</em>.</p>
+</div>
+<div id="about-the-pasilla-dataset" class="section level3">
+<h3>About the pasilla dataset</h3>
+<p>We continue with the <a href="http://bioconductor.org/packages/pasilla">pasilla</a> data constructed from the count matrix method above. This data set is from an experiment on <em>Drosophila melanogaster</em> cell cultures and investigated the effect of RNAi knock-down of the splicing factor <em>pasilla</em> <span class="citation">(Brooks et al. 2011)</span>. The detailed transcript of the production of the <a href="http://bioconductor.org/packages/pasilla">pasilla</a> data is provide [...]
+<p><a name="de"></a></p>
+</div>
+</div>
+<div id="differential-expression-analysis" class="section level2">
+<h2>Differential expression analysis</h2>
+<p>The standard differential expression analysis steps are wrapped into a single function, <em>DESeq</em>. The estimation steps performed by this function are described <a href="#theory">below</a>, in the manual page for <code>?DESeq</code> and in the Methods section of the DESeq2 publication <span class="citation">(Love, Huber, and Anders 2014)</span>.</p>
+<p>Results tables are generated using the function <em>results</em>, which extracts a results table with log2 fold changes, <em>p</em> values and adjusted <em>p</em> values. With no additional arguments to <em>results</em>, the log2 fold change and Wald test <em>p</em> value will be for the last variable in the design formula, and if this is a factor, the comparison will be the last level of this variable over the first level. However, the order of the variables of the design do not matt [...]
+<p>Details about the comparison are printed to the console, above the results table. The text, <code>condition treated vs untreated</code>, tells you that the estimates are of the logarithmic fold change log2(treated/untreated).</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span><span class="kw">DESeq</span>(dds)
+res <-<span class="st"> </span><span class="kw">results</span>(dds)
+res</code></pre></div>
+<pre><code>## log2 fold change (MLE): condition treated vs untreated 
+## Wald test p-value: condition treated vs untreated 
+## DataFrame with 11638 rows and 6 columns
+##                 baseMean log2FoldChange     lfcSE         stat     pvalue
+##                <numeric>      <numeric> <numeric>    <numeric>  <numeric>
+## FBgn0000008   95.1440790    0.002151683 0.2238867  0.009610592 0.99233197
+## FBgn0000014    1.0565722   -0.496689957 2.1597256 -0.229978272 0.81810865
+## FBgn0000015    0.8467233   -1.882756713 2.1063362 -0.893853836 0.37140010
+## FBgn0000017 4352.5928988   -0.240025055 0.1260345 -1.904439437 0.05685298
+## FBgn0000018  418.6149305   -0.104798934 0.1482908 -0.706712077 0.47974542
+## ...                  ...            ...       ...          ...        ...
+## FBgn0261570  3208.384460     0.29543213 0.1270246   2.32578599 0.02002997
+## FBgn0261572     6.197137    -0.95912781 0.7769982  -1.23440151 0.21705333
+## FBgn0261573  2240.983986     0.01261611 0.1127225   0.11192186 0.91088536
+## FBgn0261574  4857.742672     0.01525741 0.1931199   0.07900487 0.93702875
+## FBgn0261575    10.683554     0.16355063 0.9386206   0.17424573 0.86167235
+##                  padj
+##             <numeric>
+## FBgn0000008 0.9970815
+## FBgn0000014        NA
+## FBgn0000015        NA
+## FBgn0000017 0.2862230
+## FBgn0000018 0.8282460
+## ...               ...
+## FBgn0261570 0.1428209
+## FBgn0261572 0.6097343
+## FBgn0261573 0.9824950
+## FBgn0261574 0.9888664
+## FBgn0261575 0.9688434</code></pre>
+<p><a name="lfcShrink"></a></p>
+<p>In previous versions of DESeq2, the <em>DESeq</em> function by default would produce moderated, or shrunken, log2 fold changes through the use of the <code>betaPrior</code> argument. In version 1.16 and higher, we have split the moderation of log2 fold changes into a separate function, <em>lfcShrink</em>, for reasons described in the <a href="#changes">changes section</a> below.</p>
+<p>Here we provide the <code>dds</code> object and the number of the coefficient we want to moderate. It is also possible to specify a <code>contrast</code>, instead of <code>coef</code>, which works the same as the <code>contrast</code> argument of the <em>results</em> function. If a results object is provided, the <code>log2FoldChange</code> column will be swapped out, otherwise <em>lfcShrink</em> returns a vector of shrunken log2 fold changes.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">resultsNames</span>(dds)</code></pre></div>
+<pre><code>## [1] "Intercept"                      "condition_treated_vs_untreated"</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">resLFC <-<span class="st"> </span><span class="kw">lfcShrink</span>(dds, <span class="dt">coef=</span><span class="dv">2</span>, <span class="dt">res=</span>res)
+resLFC</code></pre></div>
+<pre><code>## log2 fold change (MAP): condition treated vs untreated 
+## Wald test p-value: condition treated vs untreated 
+## DataFrame with 11638 rows and 5 columns
+##                 baseMean log2FoldChange         stat     pvalue      padj
+##                <numeric>      <numeric>    <numeric>  <numeric> <numeric>
+## FBgn0000008   95.1440790    0.001476959  0.009610592 0.99233197 0.9970815
+## FBgn0000014    1.0565722   -0.011952307 -0.229978272 0.81810865        NA
+## FBgn0000015    0.8467233   -0.046559241 -0.893853836 0.37140010        NA
+## FBgn0000017 4352.5928988   -0.209784559 -1.904439437 0.05685298 0.2862230
+## FBgn0000018  418.6149305   -0.087416357 -0.706712077 0.47974542 0.8282460
+## ...                  ...            ...          ...        ...       ...
+## FBgn0261570  3208.384460     0.25779079   2.32578599 0.02002997 0.1428209
+## FBgn0261572     6.197137    -0.14722257  -1.23440151 0.21705333 0.6097343
+## FBgn0261573  2240.983986     0.01131286   0.11192186 0.91088536 0.9824950
+## FBgn0261574  4857.742672     0.01140563   0.07900487 0.93702875 0.9888664
+## FBgn0261575    10.683554     0.01883364   0.17424573 0.86167235 0.9688434</code></pre>
+<p>The above steps should take less than 30 seconds for most analyses. For experiments with many samples (e.g. 100 samples), one can take advantage of parallelized computation. Both of the above functions have an argument <code>parallel</code> which if set to <code>TRUE</code> can be used to distribute computation across cores specified by the <em>register</em> function of <a href="http://bioconductor.org/packages/BiocParallel">BiocParallel</a>. For example, the following chunk (not eval [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"BiocParallel"</span>)
+<span class="kw">register</span>(<span class="kw">MulticoreParam</span>(<span class="dv">4</span>))</code></pre></div>
+<p>We can order our results table by the smallest adjusted <em>p</em> value:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">resOrdered <-<span class="st"> </span>res[<span class="kw">order</span>(res$padj),]</code></pre></div>
+<p>We can summarize some basic tallies using the <em>summary</em> function.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">summary</span>(res)</code></pre></div>
+<pre><code>## 
+## out of 11638 with nonzero total read count
+## adjusted p-value < 0.1
+## LFC > 0 (up)     : 515, 4.4% 
+## LFC < 0 (down)   : 537, 4.6% 
+## outliers [1]     : 1, 0.0086% 
+## low counts [2]   : 3159, 27% 
+## (mean count < 6)
+## [1] see 'cooksCutoff' argument of ?results
+## [2] see 'independentFiltering' argument of ?results</code></pre>
+<p>How many adjusted p-values were less than 0.1?</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">sum</span>(res$padj <<span class="st"> </span><span class="fl">0.1</span>, <span class="dt">na.rm=</span><span class="ot">TRUE</span>)</code></pre></div>
+<pre><code>## [1] 1052</code></pre>
+<p>The <em>results</em> function contains a number of arguments to customize the results table which is generated. You can read about these arguments by looking up <code>?results</code>. Note that the <em>results</em> function automatically performs independent filtering based on the mean of normalized counts for each gene, optimizing the number of genes which will have an adjusted <em>p</em> value below a given FDR cutoff, <code>alpha</code>. Independent filtering is further discussed < [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">res05 <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">alpha=</span><span class="fl">0.05</span>)
+<span class="kw">summary</span>(res05)</code></pre></div>
+<pre><code>## 
+## out of 11638 with nonzero total read count
+## adjusted p-value < 0.05
+## LFC > 0 (up)     : 408, 3.5% 
+## LFC < 0 (down)   : 433, 3.7% 
+## outliers [1]     : 1, 0.0086% 
+## low counts [2]   : 3159, 27% 
+## (mean count < 6)
+## [1] see 'cooksCutoff' argument of ?results
+## [2] see 'independentFiltering' argument of ?results</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">sum</span>(res05$padj <<span class="st"> </span><span class="fl">0.05</span>, <span class="dt">na.rm=</span><span class="ot">TRUE</span>)</code></pre></div>
+<pre><code>## [1] 841</code></pre>
+<p><a name="IHW"></a></p>
+<p>A generalization of the idea of <em>p</em> value filtering is to <em>weight</em> hypotheses to optimize power. A Bioconductor package, <a href="http://bioconductor.org/packages/IHW">IHW</a>, is available that implements the method of <em>Independent Hypothesis Weighting</em> <span class="citation">(Ignatiadis et al. 2015)</span>. Here we show the use of <em>IHW</em> for <em>p</em> value adjustment of DESeq2 results. For more details, please see the vignette of the <a href="http://bioc [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"IHW"</span>)
+resIHW <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">filterFun=</span>ihw)
+<span class="kw">summary</span>(resIHW)</code></pre></div>
+<pre><code>## 
+## out of 11638 with nonzero total read count
+## adjusted p-value < 0.1
+## LFC > 0 (up)     : 515, 4.4% 
+## LFC < 0 (down)   : 549, 4.7% 
+## outliers [1]     : 1, 0.0086% 
+## [1] see 'cooksCutoff' argument of ?results
+## [2] see metadata(res)$ihwResult on hypothesis weighting</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">sum</span>(resIHW$padj <<span class="st"> </span><span class="fl">0.1</span>, <span class="dt">na.rm=</span><span class="ot">TRUE</span>)</code></pre></div>
+<pre><code>## [1] 1064</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">metadata</span>(resIHW)$ihwResult</code></pre></div>
+<pre><code>## ihwResult object with 11638 hypothesis tests 
+## Nominal FDR control level: 0.1 
+## Split into 7 bins, based on an ordinal covariate</code></pre>
+<p>If a multi-factor design is used, or if the variable in the design formula has more than two levels, the <code>contrast</code> argument of <em>results</em> can be used to extract different comparisons from the <em>DESeqDataSet</em> returned by <em>DESeq</em>. The use of the <code>contrast</code> argument is further discussed <a href="#contrasts">below</a>.</p>
+<p>For advanced users, note that all the values calculated by the DESeq2 package are stored in the <em>DESeqDataSet</em> object, and access to these values is discussed <a href="#access">below</a>.</p>
+</div>
+<div id="exploring-and-exporting-results" class="section level2">
+<h2>Exploring and exporting results</h2>
+<div id="ma-plot" class="section level3">
+<h3>MA-plot</h3>
+<p>In DESeq2, the function <em>plotMA</em> shows the log2 fold changes attributable to a given variable over the mean of normalized counts for all the samples in the <em>DESeqDataSet</em>. Points will be colored red if the adjusted <em>p</em> value is less than 0.1. Points which fall out of the window are plotted as open triangles pointing either up or down.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plotMA</span>(res, <span class="dt">ylim=</span><span class="kw">c</span>(-<span class="dv">2</span>,<span class="dv">2</span>))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdZ5wcxbnv8ZpZ7Upa5QACBZJkgshgY4x9bBF8wQZs0AI26QDGBvv4HqJNuJLBGAQcMscEEwR8jEiClchgQMKYaGxEElkosRJCaSWttHln7otGTdOhuqo6TM/M7/vhxWq2u7pmdpH+U/P0U7lisSgAAAAAqMmXegIAAABAOSFAAwAAABoI0AAAAIAGAjQAAACggQANAAAAaCBAAwAAABoI0AAAAIAGAjQAAACggQANAAAAaCBAAwAAABoI0AAAAIAGAjQAAACggQANAAAAaCBAAwAAABoI0AAAAIAGAjQAAACggQANAAAAaCBAAwAAABoI0AAAAIAGAjQAAACggQANAAAAaCBAAwAAABoI0AAAAIAGAjQAA [...]
+<p>It is also useful to visualize the MA-plot for the shrunken log2 fold changes, which remove the noise associated with log2 fold changes from low count genes without requiring arbitrary filtering thresholds.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plotMA</span>(resLFC, <span class="dt">ylim=</span><span class="kw">c</span>(-<span class="dv">2</span>,<span class="dv">2</span>))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzde5xVVf3/8XW4O6Ago4ACCUKiSKJQpJgJSqmBKYySKaiYaOUllK+aX9R+JpR5/VoIipkVpIlyyQtayhje0NSBCAUBB0UwEBGBAUYQ5vfHxu12X9dae+3LOef1fPh4OJzZl3X2OXP2e6/zWWsXGhoaBAAAAAA5jbJuAAAAAFBMCNAAAACAAgI0AAAAoIAADQAAACggQAMAAAAKCNAAAACAAgI0AAAAoIAADQAAACggQAMAAAAKCNAAAACAAgI0AAAAoIAADQAAACggQAMAAAAKCNAAAACAAgI0AAAAoIAADQAAACggQAMAAAAKCNAAAACAAgI0AAAAoIAADQAAACggQAMAAAAKCNAAAACAAgI0AAAAoIAAD [...]
+<p>After calling <em>plotMA</em>, one can use the function <em>identify</em> to interactively detect the row number of individual genes by clicking on the plot. One can then recover the gene identifiers by saving the resulting indices:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">idx <-<span class="st"> </span><span class="kw">identify</span>(res$baseMean, res$log2FoldChange)
+<span class="kw">rownames</span>(res)[idx]</code></pre></div>
+</div>
+<div id="plot-counts" class="section level3">
+<h3>Plot counts</h3>
+<p>It can also be useful to examine the counts of reads for a single gene across the groups. A simple function for making this plot is <em>plotCounts</em>, which normalizes counts by sequencing depth and adds a pseudocount of 1/2 to allow for log scale plotting. The counts are grouped by the variables in <code>intgroup</code>, where more than one variable can be specified. Here we specify the gene which had the smallest <em>p</em> value from the results table created above. You can selec [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plotCounts</span>(dds, <span class="dt">gene=</span><span class="kw">which.min</span>(res$padj), <span class="dt">intgroup=</span><span class="st">"condition"</span>)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAMAAADNCOCpAAADAFBMVEUAAAABAQECAgIDAwMEBAQFBQUGBgYHBwcICAgJCQkKCgoLCwsMDAwNDQ0ODg4PDw8QEBARERESEhITExMUFBQVFRUWFhYXFxcYGBgZGRkaGhobGxscHBwdHR0eHh4fHx8gICAhISEiIiIjIyMkJCQlJSUmJiYnJycoKCgpKSkqKiorKyssLCwtLS0uLi4vLy8wMDAxMTEyMjIzMzM0NDQ1NTU2NjY3Nzc4ODg5OTk6Ojo7Ozs8PDw9PT0+Pj4/Pz9AQEBBQUFCQkJDQ0NERERFRUVGRkZHR0dISEhJSUlKSkpLS0tMTExNTU1OTk5PT09QUFBRUVFSUlJTU1NUVFRVVVVWVlZXV1dYWFhZWVlaWlpbW1tcXFxdXV1eXl5fX19gYGBhYWFiYmJjY2NkZ [...]
+<p>For customized plotting, an argument <code>returnData</code> specifies that the function should only return a <em>data.frame</em> for plotting with <em>ggplot</em>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">d <-<span class="st"> </span><span class="kw">plotCounts</span>(dds, <span class="dt">gene=</span><span class="kw">which.min</span>(res$padj), <span class="dt">intgroup=</span><span class="st">"condition"</span>, 
+                <span class="dt">returnData=</span><span class="ot">TRUE</span>)
+<span class="kw">library</span>(<span class="st">"ggplot2"</span>)
+<span class="kw">ggplot</span>(d, <span class="kw">aes</span>(<span class="dt">x=</span>condition, <span class="dt">y=</span>count)) +<span class="st"> </span>
+<span class="st">  </span><span class="kw">geom_point</span>(<span class="dt">position=</span><span class="kw">position_jitter</span>(<span class="dt">w=</span><span class="fl">0.1</span>,<span class="dt">h=</span><span class="dv">0</span>)) +<span class="st"> </span>
+<span class="st">  </span><span class="kw">scale_y_log10</span>(<span class="dt">breaks=</span><span class="kw">c</span>(<span class="dv">25</span>,<span class="dv">100</span>,<span class="dv">400</span>))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAMAAADNCOCpAAAC7lBMVEUAAAABAQECAgIDAwMEBAQFBQUGBgYHBwcICAgJCQkKCgoLCwsMDAwNDQ0ODg4PDw8QEBARERESEhITExMUFBQVFRUWFhYXFxcYGBgZGRkaGhobGxscHBwdHR0eHh4fHx8gICAhISEiIiIjIyMkJCQlJSUmJiYnJycoKCgpKSkqKiorKyssLCwtLS0uLi4vLy8xMTEyMjIzMzM0NDQ1NTU2NjY3Nzc4ODg5OTk6Ojo7Ozs8PDw9PT0/Pz9AQEBBQUFCQkJDQ0NERERFRUVGRkZHR0dISEhJSUlKSkpLS0tNTU1OTk5PT09QUFBRUVFSUlJTU1NUVFRVVVVWVlZXV1dYWFhZWVlaWlpbW1tcXFxdXV1eXl5fX19gYGBhYWFiYmJjY2NkZGRlZWVmZmZnZ [...]
+</div>
+<div id="more-information-on-results-columns" class="section level3">
+<h3>More information on results columns</h3>
+<p>Information about which variables and tests were used can be found by calling the function <em>mcols</em> on the results object.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">mcols</span>(res)$description</code></pre></div>
+<pre><code>## [1] "mean of normalized counts for all samples"             
+## [2] "log2 fold change (MLE): condition treated vs untreated"
+## [3] "standard error: condition treated vs untreated"        
+## [4] "Wald statistic: condition treated vs untreated"        
+## [5] "Wald test p-value: condition treated vs untreated"     
+## [6] "BH adjusted p-values"</code></pre>
+<p>For a particular gene, a log2 fold change of -1 for <code>condition treated vs untreated</code> means that the treatment induces a multiplicative change in observed gene expression level of <span class="math inline">\(2^{-1} = 0.5\)</span> compared to the untreated condition. If the variable of interest is continuous-valued, then the reported log2 fold change is per unit of change of that variable.</p>
+<p><a name="pvaluesNA"></a></p>
+<p><strong>Note on p-values set to NA</strong>: some values in the results table can be set to <code>NA</code> for one of the following reasons:</p>
+<ul>
+<li>If within a row, all samples have zero counts, the <code>baseMean</code> column will be zero, and the log2 fold change estimates, <em>p</em> value and adjusted <em>p</em> value will all be set to <code>NA</code>.</li>
+<li>If a row contains a sample with an extreme count outlier then the <em>p</em> value and adjusted <em>p</em> value will be set to <code>NA</code>. These outlier counts are detected by Cook’s distance. Customization of this outlier filtering and description of functionality for replacement of outlier counts and refitting is described <a href="#outlier">below</a></li>
+<li>If a row is filtered by automatic independent filtering, for having a low mean normalized count, then only the adjusted <em>p</em> value will be set to <code>NA</code>. Description and customization of independent filtering is described <a href="#indfilt">below</a></li>
+</ul>
+</div>
+<div id="rich-visualization-and-reporting-of-results" class="section level3">
+<h3>Rich visualization and reporting of results</h3>
+<p><strong>ReportingTools.</strong> An HTML report of the results with plots and sortable/filterable columns can be generated using the <a href="http://bioconductor.org/packages/ReportingTools">ReportingTools</a> package on a <em>DESeqDataSet</em> that has been processed by the <em>DESeq</em> function. For a code example, see the <em>RNA-seq differential expression</em> vignette at the <a href="http://bioconductor.org/packages/ReportingTools">ReportingTools</a> page, or the manual page f [...]
+<p><strong>regionReport.</strong> An HTML and PDF summary of the results with plots can also be generated using the <a href="http://bioconductor.org/packages/regionReport">regionReport</a> package. The <em>DESeq2Report</em> function should be run on a <em>DESeqDataSet</em> that has been processed by the <em>DESeq</em> function. For more details see the manual page for <em>DESeq2Report</em> and an example vignette in the <a href="http://bioconductor.org/packages/regionReport">regionReport [...]
+<p><strong>Glimma.</strong> Interactive visualization of DESeq2 output, including MA-plots (also called MD-plot) can be generated using the <a href="http://bioconductor.org/packages/Glimma">Glimma</a> package. See the manual page for <em>glMDPlot.DESeqResults</em>.</p>
+<p><strong>pcaExplorer.</strong> Interactive visualization of DESeq2 output, including PCA plots, boxplots of counts and other useful summaries can be generated using the <a href="http://bioconductor.org/packages/pcaExplorer">pcaExplorer</a> package. See the <em>Launching the application</em> section of the package vignette.</p>
+</div>
+<div id="exporting-results-to-csv-files" class="section level3">
+<h3>Exporting results to CSV files</h3>
+<p>A plain-text file of the results can be exported using the base R functions <em>write.csv</em> or <em>write.delim</em>. We suggest using a descriptive file name indicating the variable and levels which were tested.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">write.csv</span>(<span class="kw">as.data.frame</span>(resOrdered), 
+          <span class="dt">file=</span><span class="st">"condition_treated_results.csv"</span>)</code></pre></div>
+<p>Exporting only the results which pass an adjusted <em>p</em> value threshold can be accomplished with the <em>subset</em> function, followed by the <em>write.csv</em> function.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">resSig <-<span class="st"> </span><span class="kw">subset</span>(resOrdered, padj <<span class="st"> </span><span class="fl">0.1</span>)
+resSig</code></pre></div>
+<pre><code>## log2 fold change (MLE): condition treated vs untreated 
+## Wald test p-value: condition treated vs untreated 
+## DataFrame with 1052 rows and 6 columns
+##              baseMean log2FoldChange      lfcSE      stat        pvalue
+##             <numeric>      <numeric>  <numeric> <numeric>     <numeric>
+## FBgn0039155  730.5958      -4.619006 0.16872512 -27.37593 5.307306e-165
+## FBgn0025111 1501.4105       2.899863 0.12693550  22.84517 1.632133e-115
+## FBgn0029167 3706.1165      -2.197001 0.09701773 -22.64535 1.550285e-113
+## FBgn0003360 4343.0354      -3.179672 0.14352683 -22.15385 9.577104e-109
+## FBgn0035085  638.2326      -2.560409 0.13731558 -18.64617  1.356647e-77
+## ...               ...            ...        ...       ...           ...
+## FBgn0004359  83.96562      0.6448247  0.2573869  2.505274    0.01223565
+## FBgn0030026 212.16680      0.5660727  0.2260159  2.504571    0.01226001
+## FBgn0038874 103.79261     -0.6831454  0.2727706 -2.504469    0.01226354
+## FBgn0053329 602.55858     -0.4998614  0.1997516 -2.502415    0.01233494
+## FBgn0031183 428.52319     -0.3472728  0.1388560 -2.500957    0.01238581
+##                      padj
+##                 <numeric>
+## FBgn0039155 4.499534e-161
+## FBgn0025111 6.918613e-112
+## FBgn0029167 4.381106e-110
+## FBgn0003360 2.029867e-105
+## FBgn0035085  2.300330e-74
+## ...                   ...
+## FBgn0004359    0.09898268
+## FBgn0030026    0.09901933
+## FBgn0038874    0.09901933
+## FBgn0053329    0.09950107
+## FBgn0031183    0.09981644</code></pre>
+</div>
+</div>
+<div id="multi-factor-designs" class="section level2">
+<h2>Multi-factor designs</h2>
+<p>Experiments with more than one factor influencing the counts can be analyzed using design formula that include the additional variables. In fact, DESeq2 can analyze any possible experimental design that can be expressed with fixed effects terms (multiple factors, designs with interactions, designs with continuous variables, splines, and so on are all possible).</p>
+<p>By adding variables to the design, one can control for additional variation in the counts. For example, if the condition samples are balanced across experimental batches, by including the <code>batch</code> factor to the design, one can increase the sensitivity for finding differences due to <code>condition</code>. There are multiple ways to analyze experiments when the additional variables are of interest and not just controlling factors (see <a href="#interactions">section on intera [...]
+<p>The data in the <a href="http://bioconductor.org/packages/pasilla">pasilla</a> package have a condition of interest (the column <code>condition</code>), as well as information on the type of sequencing which was performed (the column <code>type</code>), as we can see below:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">colData</span>(dds)</code></pre></div>
+<pre><code>## DataFrame with 7 rows and 3 columns
+##            condition        type sizeFactor
+##             <factor>    <factor>  <numeric>
+## treated1     treated single-read  1.6355751
+## treated2     treated  paired-end  0.7612698
+## treated3     treated  paired-end  0.8326526
+## untreated1 untreated single-read  1.1382630
+## untreated2 untreated single-read  1.7930004
+## untreated3 untreated  paired-end  0.6495470
+## untreated4 untreated  paired-end  0.7516892</code></pre>
+<p>We create a copy of the <em>DESeqDataSet</em>, so that we can rerun the analysis using a multi-factor design.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">ddsMF <-<span class="st"> </span>dds</code></pre></div>
+<p>We can account for the different types of sequencing, and get a clearer picture of the differences attributable to the treatment. As <code>condition</code> is the variable of interest, we put it at the end of the formula. Thus the <em>results</em> function will by default pull the <code>condition</code> results unless <code>contrast</code> or <code>name</code> arguments are specified. Then we can re-run <em>DESeq</em>:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">design</span>(ddsMF) <-<span class="st"> </span><span class="kw">formula</span>(~<span class="st"> </span>type +<span class="st"> </span>condition)
+ddsMF <-<span class="st"> </span><span class="kw">DESeq</span>(ddsMF)</code></pre></div>
+<p>Again, we access the results using the <em>results</em> function.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">resMF <-<span class="st"> </span><span class="kw">results</span>(ddsMF)
+<span class="kw">head</span>(resMF)</code></pre></div>
+<pre><code>## log2 fold change (MLE): condition treated vs untreated 
+## Wald test p-value: condition treated vs untreated 
+## DataFrame with 6 rows and 6 columns
+##                 baseMean log2FoldChange     lfcSE        stat     pvalue
+##                <numeric>      <numeric> <numeric>   <numeric>  <numeric>
+## FBgn0000008   95.1440790    -0.04067393 0.2222916 -0.18297560 0.85481716
+## FBgn0000014    1.0565722    -0.08498351 2.1115371 -0.04024722 0.96789603
+## FBgn0000015    0.8467233    -1.86105812 2.2635706 -0.82217807 0.41097556
+## FBgn0000017 4352.5928988    -0.25612969 0.1118570 -2.28979575 0.02203316
+## FBgn0000018  418.6149305    -0.06468996 0.1317230 -0.49110616 0.62335136
+## FBgn0000024    6.4062892     0.31109845 0.7658820  0.40619635 0.68459834
+##                  padj
+##             <numeric>
+## FBgn0000008 0.9504077
+## FBgn0000014        NA
+## FBgn0000015        NA
+## FBgn0000017 0.1303866
+## FBgn0000018 0.8640563
+## FBgn0000024 0.8919545</code></pre>
+<p>It is also possible to retrieve the log2 fold changes, <em>p</em> values and adjusted <em>p</em> values of the <code>type</code> variable. The <code>contrast</code> argument of the function <em>results</em> takes a character vector of length three: the name of the variable, the name of the factor level for the numerator of the log2 ratio, and the name of the factor level for the denominator. The <code>contrast</code> argument can also take other forms, as described in the help page fo [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">resMFType <-<span class="st"> </span><span class="kw">results</span>(ddsMF,
+                     <span class="dt">contrast=</span><span class="kw">c</span>(<span class="st">"type"</span>, <span class="st">"single-read"</span>, <span class="st">"paired-end"</span>))
+<span class="kw">head</span>(resMFType)</code></pre></div>
+<pre><code>## log2 fold change (MLE): type single-read vs paired-end 
+## Wald test p-value: type single.read vs paired.end 
+## DataFrame with 6 rows and 6 columns
+##                 baseMean log2FoldChange     lfcSE       stat    pvalue
+##                <numeric>      <numeric> <numeric>  <numeric> <numeric>
+## FBgn0000008   95.1440790    -0.26225891 0.2207626 -1.1879680 0.2348460
+## FBgn0000014    1.0565722     3.29057851 2.0869706  1.5767249 0.1148588
+## FBgn0000015    0.8467233    -0.58154078 2.1821934 -0.2664937 0.7898590
+## FBgn0000017 4352.5928988    -0.09976491 0.1117182 -0.8930049 0.3718545
+## FBgn0000018  418.6149305     0.22930201 0.1306356  1.7552790 0.0792116
+## FBgn0000024    6.4062892     0.30788127 0.7611816  0.4044781 0.6858612
+##                  padj
+##             <numeric>
+## FBgn0000008 0.5310094
+## FBgn0000014        NA
+## FBgn0000015        NA
+## FBgn0000017 0.6693382
+## FBgn0000018 0.2848511
+## FBgn0000024        NA</code></pre>
+<p>If the variable is continuous or an interaction term (see <a href="#interactions">section on interactions</a>) then the results can be extracted using the <code>name</code> argument to <em>results</em>, where the name is one of elements returned by <code>resultsNames(dds)</code>.</p>
+<p><a name="transform"></a></p>
+</div>
+</div>
+<div id="data-transformations-and-visualization" class="section level1">
+<h1>Data transformations and visualization</h1>
+<div id="count-data-transformations" class="section level2">
+<h2>Count data transformations</h2>
+<p>In order to test for differential expression, we operate on raw counts and use discrete distributions as described in the previous section on differential expression. However for other downstream analyses – e.g. for visualization or clustering – it might be useful to work with transformed versions of the count data.</p>
+<p>Maybe the most obvious choice of transformation is the logarithm. Since count values for a gene can be zero in some conditions (and non-zero in others), some advocate the use of <em>pseudocounts</em>, i.e. transformations of the form:</p>
+<p><span class="math display">\[ y = \log_2(n + n_0) \]</span></p>
+<p>where <em>n</em> represents the count values and <span class="math inline">\(n_0\)</span> is a positive constant.</p>
+<p>In this section, we discuss two alternative approaches that offer more theoretical justification and a rational way of choosing the parameter equivalent to <span class="math inline">\(n_0\)</span> above. The <em>regularized logarithm</em> or <em>rlog</em> incorporates a prior on the sample differences <span class="citation">(Love, Huber, and Anders 2014)</span>, and the other uses the concept of variance stabilizing transformations (VST) <span class="citation">(Tibshirani 1988; Huber  [...]
+<p>The point of these two transformations, the <em>rlog</em> and the VST, is to remove the dependence of the variance on the mean, particularly the high variance of the logarithm of count data when the mean is low. Both <em>rlog</em> and VST use the experiment-wide trend of variance over mean, in order to transform the data to remove the experiment-wide trend. Note that we do not require or desire that all the genes have <em>exactly</em> the same variance after transformation. Indeed, in [...]
+<p><strong>Note on running time:</strong> if you have many samples (e.g. 100s), the <em>rlog</em> function might take too long, and so the <em>vst</em> function will be a faster choice. The rlog and VST have similar properties, but the rlog requires fitting a shrinkage term for each sample and each gene which takes time. See the DESeq2 paper for more discussion on the differences <span class="citation">(Love, Huber, and Anders 2014)</span>.</p>
+<div id="blind-dispersion-estimation" class="section level3">
+<h3>Blind dispersion estimation</h3>
+<p>The two functions, <em>rlog</em> and <em>vst</em> have an argument <code>blind</code>, for whether the transformation should be blind to the sample information specified by the design formula. When <code>blind</code> equals <code>TRUE</code> (the default), the functions will re-estimate the dispersions using only an intercept. This setting should be used in order to compare samples in a manner wholly unbiased by the information about experimental groups, for example to perform sample  [...]
+<p>However, blind dispersion estimation is not the appropriate choice if one expects that many or the majority of genes (rows) will have large differences in counts which are explainable by the experimental design, and one wishes to transform the data for downstream analysis. In this case, using blind dispersion estimation will lead to large estimates of dispersion, as it attributes differences due to experimental design as unwanted <em>noise</em>, and will result in overly shrinking the [...]
+</div>
+<div id="extracting-transformed-values" class="section level3">
+<h3>Extracting transformed values</h3>
+<p>These transformation functions return an object of class <em>DESeqTransform</em> which is a subclass of <em>RangedSummarizedExperiment</em>. For ~20 samples, running on a newly created <code>DESeqDataSet</code>, <em>rlog</em> may take 30 seconds, <em>varianceStabilizingTransformation</em> may take 5 seconds, and <em>vst</em> less than 1 second (by subsetting to 1000 genes for calculating the global dispersion trend). However, the running times are shorter and more similar with <code>b [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">rld <-<span class="st"> </span><span class="kw">rlog</span>(dds, <span class="dt">blind=</span><span class="ot">FALSE</span>)
+vsd <-<span class="st"> </span><span class="kw">varianceStabilizingTransformation</span>(dds, <span class="dt">blind=</span><span class="ot">FALSE</span>)
+vsd.fast <-<span class="st"> </span><span class="kw">vst</span>(dds, <span class="dt">blind=</span><span class="ot">FALSE</span>)
+<span class="kw">head</span>(<span class="kw">assay</span>(rld), <span class="dv">3</span>)</code></pre></div>
+<pre><code>##               treated1   treated2   treated3 untreated1 untreated2
+## FBgn0000008  6.5052069  6.6702250  6.5002811  6.4775687  6.5312761
+## FBgn0000014  0.1781321  0.1489758  0.1486405  0.1997286  0.1537222
+## FBgn0000015 -0.2871970 -0.2937085 -0.2939834 -0.2948680 -0.2801519
+##             untreated3 untreated4
+## FBgn0000008  6.6743786  6.5524385
+## FBgn0000014  0.1495966  0.1490241
+## FBgn0000015 -0.2765435 -0.2635561</code></pre>
+</div>
+<div id="regularized-log-transformation" class="section level3">
+<h3>Regularized log transformation</h3>
+<p>The function <em>rlog</em>, stands for <em>regularized log</em>, transforming the original count data to the log2 scale by fitting a model with a term for each sample and a prior distribution on the coefficients which is estimated from the data. This is the same kind of shrinkage (sometimes referred to as regularization, or moderation) of log fold changes used by the <em>DESeq</em> and <em>nbinomWaldTest</em>. The resulting data contains elements defined as:</p>
+<p><span class="math display">\[ \log_2(q_{ij}) = \beta_{i0} + \beta_{ij} \]</span></p>
+<p>where <span class="math inline">\(q_{ij}\)</span> is a parameter proportional to the expected true concentration of fragments for gene <em>i</em> and sample <em>j</em> (see formula <a href="#theory">below</a>), <span class="math inline">\(\beta_{i0}\)</span> is an intercept which does not undergo shrinkage, and <span class="math inline">\(\beta_{ij}\)</span> is the sample-specific effect which is shrunk toward zero based on the dispersion-mean trend over the entire dataset. The trend  [...]
+<p>Note that, as <span class="math inline">\(q_{ij}\)</span> represents the part of the mean value <span class="math inline">\(\mu_{ij}\)</span> after the size factor <span class="math inline">\(s_j\)</span> has been divided out, it is clear that the rlog transformation inherently accounts for differences in sequencing depth. Without priors, this design matrix would lead to a non-unique solution, however the addition of a prior on non-intercept betas allows for a unique solution to be fo [...]
+</div>
+<div id="variance-stabilizing-transformation" class="section level3">
+<h3>Variance stabilizing transformation</h3>
+<p>Above, we used a parametric fit for the dispersion. In this case, the closed-form expression for the variance stabilizing transformation is used by <em>varianceStabilizingTransformation</em>, which is derived in the file <code>vst.pdf</code>, that is distributed in the package alongside this vignette. If a local fit is used (option <code>fitType="locfit"</code> to <em>estimateDispersions</em>) a numerical integration is used instead.</p>
+</div>
+<div id="effects-of-transformations-on-the-variance" class="section level3">
+<h3>Effects of transformations on the variance</h3>
+<p>The figure below plots the standard deviation of the transformed data, across samples, against the mean, using the shifted logarithm transformation, the regularized log transformation and the variance stabilizing transformation. The shifted logarithm has elevated standard deviation in the lower count range, and the regularized log to a lesser extent, while for the variance stabilized data the standard deviation is roughly constant along the whole dynamic range.</p>
+<p>Note that the vertical axis in such plots is the square root of the variance over all samples, so including the variance due to the experimental conditions. While a flat curve of the square root of variance over the mean may seem like the goal of such transformations, this may be unreasonable in the case of datasets with many true differences due to the experimental conditions.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="co"># this gives log2(n + 1)</span>
+ntd <-<span class="st"> </span><span class="kw">normTransform</span>(dds)
+<span class="kw">library</span>(<span class="st">"vsn"</span>)
+notAllZero <-<span class="st"> </span>(<span class="kw">rowSums</span>(<span class="kw">counts</span>(dds))><span class="dv">0</span>)
+<span class="kw">meanSdPlot</span>(<span class="kw">assay</span>(ntd)[notAllZero,])</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzde3xU1b3//89kZkIySUhCInBAUBFtFGzxVu81RfR4Odj0WBUvvVl6rPYcbMXe1NpW6zla/Gl/+PMGqNX+jqbSCqX4LVbaolatSisV0CBaKlpQJCRkQu7JfP/Y7e50LjtrzeyZvWbv1/PBH5M9K2t/9p6ZPe8s1swKJRIJAQAAAKCmzOsCAAAAgFJCgAYAAAA0EKABAAAADQRoAAAAQAMBGgAAANBAgAYAAAA0EKABAAAADQRoAAAAQAMBGgAAANBAgAYAAAA0EKABAAAADQRoAAAAQAMBGgAAANBAgAYAAAA0RLwuoEi6u7v7+voK1Hk4HK6vrxeRgYGBrq6uAu3FZNXV1ZFIpLOz0+tCPBCLxWKxmIjE4 [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">meanSdPlot</span>(<span class="kw">assay</span>(rld[notAllZero,]))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzde5wU1Z3//9PdM9PDwHBtlICAoJJJmCiQREWjjobNenngJSZxjBpzIRoSvmpWjcq6bKK7q4k8lJ9gVLxEzG+llcdXxOAjaEic6BqFzCoo6oBGoqKg4TIzzKVhLv39o5JKp6u6+pyu012nq1/PBw8fY3V11adOVXe958zpPpF0Oi0AAAAAyIkGXQAAAABQTgjQAAAAgAICNAAAAKCAAA0AAAAoIEADAAAACgjQAAAAgAICNAAAAKCAAA0AAAAoIEADAAAACgjQAAAAgAICNAAAAKCAAA0AAAAoIEADAAAACgjQAAAAgIKqoAsoA11dXalUqhhbjkQiY8aMEUL09/e3t7cXYxdmqq+vj0ajHR0dQRdSOsOHD [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">meanSdPlot</span>(<span class="kw">assay</span>(vsd[notAllZero,]))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdf3wU1b3H/7O7SQgQhWwCIggKKk2F8qsVQcUGyrX+eETw2kqsWvuDK6Xlq/aKtnC9trX9Vqs80C9oVbRW6KMS5XFVkD5ExZpirYq5CoIS0EoFFLQgJIRkk02y3z+mnbvdH7Pn7J7ZOTv7ej74I8zOznzm1847J2fnBGKxmAAAAAAgJ+h1AQAAAEAhIUADAAAACgjQAAAAgAICNAAAAKCAAA0AAAAoIEADAAAACgjQAAAAgAICNAAAAKCAAA0AAAAoIEADAAAACgjQAAAAgAICNAAAAKCAAA0AAAAoIEADAAAACkq8LqAAtLW1RSIRN5YcCASqqqqEEN3d3UeOHHFjFcaqrKzs6uo6duyY14Xkj324o9FoS [...]
+</div>
+</div>
+<div id="data-quality-assessment-by-sample-clustering-and-visualization" class="section level2">
+<h2>Data quality assessment by sample clustering and visualization</h2>
+<p>Data quality assessment and quality control (i.e. the removal of insufficiently good data) are essential steps of any data analysis. These steps should typically be performed very early in the analysis of a new data set, preceding or in parallel to the differential expression testing.</p>
+<p>We define the term <em>quality</em> as <em>fitness for purpose</em>. Our purpose is the detection of differentially expressed genes, and we are looking in particular for samples whose experimental treatment suffered from an anormality that renders the data points obtained from these particular samples detrimental to our purpose.</p>
+<div id="heatmap-of-the-count-matrix" class="section level3">
+<h3>Heatmap of the count matrix</h3>
+<p>To explore a count matrix, it is often instructive to look at it as a heatmap. Below we show how to produce such a heatmap for various transformations of the data.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"pheatmap"</span>)
+select <-<span class="st"> </span><span class="kw">order</span>(<span class="kw">rowMeans</span>(<span class="kw">counts</span>(dds,<span class="dt">normalized=</span><span class="ot">TRUE</span>)),
+                <span class="dt">decreasing=</span><span class="ot">TRUE</span>)[<span class="dv">1</span>:<span class="dv">20</span>]
+df <-<span class="st"> </span><span class="kw">as.data.frame</span>(<span class="kw">colData</span>(dds)[,<span class="kw">c</span>(<span class="st">"condition"</span>,<span class="st">"type"</span>)])
+<span class="kw">pheatmap</span>(<span class="kw">assay</span>(ntd)[select,], <span class="dt">cluster_rows=</span><span class="ot">FALSE</span>, <span class="dt">show_rownames=</span><span class="ot">FALSE</span>,
+         <span class="dt">cluster_cols=</span><span class="ot">FALSE</span>, <span class="dt">annotation_col=</span>df)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdd1hUx/oH8PdspSkoEAEFFFCaxnZFjDE2YsNYIgoEjfyM9SZeU+3tKip6Y9SIhkQSY0uMV4wFiIpoBFExwYZYKTZ6723Z8/vjeE82C8KusuyC38+TJ8/s7Jwz7+wj8DLMmWFYliUAAAAAAFCNQNsBAAAAAAC0JEigAQAAAADUgAQaAAAAAEANSKABAAAAANSABBoAAAAAQA1IoAEAAAAA1IAEGgAAAABADUigAQAAAADUgAQaAAAAAEANSKABAAAAANSABBoAAAAAQA1IoAEAAAAA1IAEGgAAAABADUigAQAAAADUIFKlUUJCwuXLl1mW1XQ0AAAAoEQkEo0ZM8bCwkLbgQDAMyrNQGdlZSF7BgAA0AqZT [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">pheatmap</span>(<span class="kw">assay</span>(rld)[select,], <span class="dt">cluster_rows=</span><span class="ot">FALSE</span>, <span class="dt">show_rownames=</span><span class="ot">FALSE</span>,
+         <span class="dt">cluster_cols=</span><span class="ot">FALSE</span>, <span class="dt">annotation_col=</span>df)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdeVxUVf8H8O+dlVVQIEEFFVA2zYU0zEwxcsM0EwVCg8fM5Vc+tiruPoqKPuWSWCSUe5mJuYCpiCaIikYuuKCC4Ma+78sw9/fH9blNA8KMMsyAn/fLXq8zZ86953smhK+Hc89hWJYlAAAAAABQjUDbAQAAAAAAtCZIoAEAAAAA1IAEGgAAAABADUigAQAAAADUgAQaAAAAAEANSKABAAAAANSABBoAAAAAQA1IoAEAAAAA1IAEGgAAAABADUigAQAAAADUgAQaAAAAAEANSKABAAAAANSABBoAAAAAQA1IoAEAAAAA1CBSpVFSUtKFCxdYltV0NAAAAKBEJBKNGTPG0tJS24EAwBMqzUBnZ2cjewYAANAKm [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">pheatmap</span>(<span class="kw">assay</span>(vsd)[select,], <span class="dt">cluster_rows=</span><span class="ot">FALSE</span>, <span class="dt">show_rownames=</span><span class="ot">FALSE</span>,
+         <span class="dt">cluster_cols=</span><span class="ot">FALSE</span>, <span class="dt">annotation_col=</span>df)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdd1hUx/oH8PdspSkoEAEFFVCaxhYRY4yN2DAaIwoEjfyMNYnXVHu7ioreGPWKhiiJPTFeMRYgKqIRRMUEG6KIFBu997bs/v445mSztF1l2QW+nydPntk5c868wyPwOs6ZYWQyGQEAAAAAgHJ4mg4AAAAAAKAlQQINAAAAAKACJNAAAAAAACpAAg0AAAAAoAIk0AAAAAAAKkACDQAAAACgAiTQAAAAAAAqQAINAAAAAKACJNAAAAAAACpAAg0AAAAAoAIk0AAAAAAAKkACDQAAAACgAiTQAAAAAAAqQAINAAAAAKACgTKNYmNjr1+/LpPJ1B0NAAAAKBAIBOPHjzczM9N0IADwglIz0JmZmcieAQAANEIik [...]
+</div>
+<div id="heatmap-of-the-sample-to-sample-distances" class="section level3">
+<h3>Heatmap of the sample-to-sample distances</h3>
+<p>Another use of the transformed data is sample clustering. Here, we apply the <em>dist</em> function to the transpose of the transformed count matrix to get sample-to-sample distances. We could alternatively use the variance stabilized transformation here.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">sampleDists <-<span class="st"> </span><span class="kw">dist</span>(<span class="kw">t</span>(<span class="kw">assay</span>(rld)))</code></pre></div>
+<p>A heatmap of this distance matrix gives us an overview over similarities and dissimilarities between samples. We have to provide a hierarchical clustering <code>hc</code> to the heatmap function based on the sample distances, or else the heatmap function would calculate a clustering based on the distances between the rows/columns of the distance matrix.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">library</span>(<span class="st">"RColorBrewer"</span>)
+sampleDistMatrix <-<span class="st"> </span><span class="kw">as.matrix</span>(sampleDists)
+<span class="kw">rownames</span>(sampleDistMatrix) <-<span class="st"> </span><span class="kw">paste</span>(rld$condition, rld$type, <span class="dt">sep=</span><span class="st">"-"</span>)
+<span class="kw">colnames</span>(sampleDistMatrix) <-<span class="st"> </span><span class="ot">NULL</span>
+colors <-<span class="st"> </span><span class="kw">colorRampPalette</span>( <span class="kw">rev</span>(<span class="kw">brewer.pal</span>(<span class="dv">9</span>, <span class="st">"Blues"</span>)) )(<span class="dv">255</span>)
+<span class="kw">pheatmap</span>(sampleDistMatrix,
+         <span class="dt">clustering_distance_rows=</span>sampleDists,
+         <span class="dt">clustering_distance_cols=</span>sampleDists,
+         <span class="dt">col=</span>colors)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdeVxW1d7//3UxCCii4gSJIwgoWSplWpYTqYkHj2ZHnJIoh44n9XTSk5pJDnSX3sf0q+ZR0gY7morzkKIhJpXzEDgkgwIyCzKpILB/f+z7vm5+F9O1LzZs8Ho9HzzOA9e13euz97Lj28Xaa+skSRIAAAAAjGOhdQEAAABAQ0KABgAAABQgQAMAAAAKEKABAAAABQjQAAAAgAJWWhcA1Mhzzz134cIFrasA6o5Op1uxYsU//vEPrQsBAPPFDDQasKKiItIzzI0kSWfOnNG6CgAwa8xAo8Fr1KhRYWGh1lUAdWHnzp1/+ctftK4CAMwdM9AAAACAAgRoAAAAQAECNAAAAKAAARoAAABQgAANAAAAKECABgAAA [...]
+</div>
+<div id="principal-component-plot-of-the-samples" class="section level3">
+<h3>Principal component plot of the samples</h3>
+<p>Related to the distance matrix is the PCA plot, which shows the samples in the 2D plane spanned by their first two principal components. This type of plot is useful for visualizing the overall effect of experimental covariates and batch effects.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plotPCA</span>(rld, <span class="dt">intgroup=</span><span class="kw">c</span>(<span class="st">"condition"</span>, <span class="st">"type"</span>))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdd2AUZf7H8Wd2N8mmZ1PoJaGEHAhIEYUDpagUQZqXoIB3URTRiKjoIYKCntgQEKRzR5WAoVdBinSlBPjRayQJSYCQ3je7+/tj7nK5NHYym7Lh/fqLPDvPPt+ZIckns888I1ksFgEAAADAOpqqLgAAAACwJwRoAAAAQAECNAAAAKAAARoAAABQgAANAAAAKECABgAAABQgQAMAAAAKEKABAAAABQjQAAAAgAIEaAAAAEABAjQAAACgAAEaAAAAUIAADQAAAChAgAYAAAAUIEADAAAAChCgAQAAAAUI0AAAAIACBGgAAABAAQI0AAAAoAABGgAAAFCAAA0AAAAoQIAGAAAAFCBAAwAAAAoQoAEAAAAFCNAAA [...]
+<p>It is also possible to customize the PCA plot using the <em>ggplot</em> function.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">pcaData <-<span class="st"> </span><span class="kw">plotPCA</span>(rld, <span class="dt">intgroup=</span><span class="kw">c</span>(<span class="st">"condition"</span>, <span class="st">"type"</span>), <span class="dt">returnData=</span><span class="ot">TRUE</span>)
+percentVar <-<span class="st"> </span><span class="kw">round</span>(<span class="dv">100</span> *<span class="st"> </span><span class="kw">attr</span>(pcaData, <span class="st">"percentVar"</span>))
+<span class="kw">ggplot</span>(pcaData, <span class="kw">aes</span>(PC1, PC2, <span class="dt">color=</span>condition, <span class="dt">shape=</span>type)) +
+<span class="st">  </span><span class="kw">geom_point</span>(<span class="dt">size=</span><span class="dv">3</span>) +
+<span class="st">  </span><span class="kw">xlab</span>(<span class="kw">paste0</span>(<span class="st">"PC1: "</span>,percentVar[<span class="dv">1</span>],<span class="st">"% variance"</span>)) +
+<span class="st">  </span><span class="kw">ylab</span>(<span class="kw">paste0</span>(<span class="st">"PC2: "</span>,percentVar[<span class="dv">2</span>],<span class="st">"% variance"</span>)) +<span class="st"> </span>
+<span class="st">  </span><span class="kw">coord_fixed</span>()</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdeVxU1f/H8XNnYGbYV80FFTSRr6aWqZW7mSmmlZpgqV+jLLOUzB+VJi4tLuWuqaWWa6K5lmZpqakZJYr6rdyXFARcEJRhZ5jfH/f7nQcPQJrLHRhgXs8/egx37r3nc+USb86ce45kNpsFAAAAAOto7F0AAAAAUJ0QoAEAAAAFCNAAAACAAgRoAAAAQAECNAAAAKAAARoAAABQgAANAAAAKECABgAAABQgQAMAAAAKEKABAAAABQjQAAAAgAIEaAAAAEABAjQAAACgAAEaAAAAUIAADQAAAChAgAYAAAAUIEADAAAAChCgAQAAAAUI0AAAAIACBGgAAABAAQI0AAAAoAABGgAAAFCAAA0AAAAoQIAGAAAAF [...]
+</div>
+</div>
+</div>
+<div id="variations-to-the-standard-workflow" class="section level1">
+<h1>Variations to the standard workflow</h1>
+<div id="wald-test-individual-steps" class="section level2">
+<h2>Wald test individual steps</h2>
+<p>The function <em>DESeq</em> runs the following functions in order:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span><span class="kw">estimateSizeFactors</span>(dds)
+dds <-<span class="st"> </span><span class="kw">estimateDispersions</span>(dds)
+dds <-<span class="st"> </span><span class="kw">nbinomWaldTest</span>(dds)</code></pre></div>
+<p><a name="contrasts"></a></p>
+</div>
+<div id="contrasts" class="section level2">
+<h2>Contrasts</h2>
+<p>A contrast is a linear combination of estimated log2 fold changes, which can be used to test if differences between groups are equal to zero. The simplest use case for contrasts is an experimental design containing a factor with three levels, say A, B and C. Contrasts enable the user to generate results for all 3 possible differences: log2 fold change of B vs A, of C vs A, and of C vs B. The <code>contrast</code> argument of <em>results</em> function is used to extract test results of [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">results</span>(dds, <span class="dt">contrast=</span><span class="kw">c</span>(<span class="st">"condition"</span>,<span class="st">"C"</span>,<span class="st">"B"</span>))</code></pre></div>
+<p>Log2 fold changes can also be added and subtracted by providing a <code>list</code> to the <code>contrast</code> argument which has two elements: the names of the log2 fold changes to add, and the names of the log2 fold changes to subtract. The names used in the list should come from <code>resultsNames(dds)</code>.</p>
+<p>Alternatively, a numeric vector of the length of <code>resultsNames(dds)</code> can be provided, for manually specifying the linear combination of terms. Demonstrations of the use of contrasts for various designs can be found in the examples section of the help page for the <em>results</em> function. The mathematical formula that is used to generate the contrasts can be found <a href="#theory">below</a>.</p>
+<p><a name="interactions"></a></p>
+</div>
+<div id="interactions" class="section level2">
+<h2>Interactions</h2>
+<p>Interaction terms can be added to the design formula, in order to test, for example, if the log2 fold change attributable to a given condition is <em>different</em> based on another factor, for example if the condition effect differs across genotype.</p>
+<p>Many users begin to add interaction terms to the design formula, when in fact a much simpler approach would give all the results tables that are desired. We will explain this approach first, because it is much simpler to perform. If the comparisons of interest are, for example, the effect of a condition for different sets of samples, a simpler approach than adding interaction terms explicitly to the design formula is to perform the following steps:</p>
+<ul>
+<li>combine the factors of interest into a single factor with all combinations of the original factors</li>
+<li>change the design to include just this factor, e.g. ~ group</li>
+</ul>
+<p>Using this design is similar to adding an interaction term, in that it models multiple condition effects which can be easily extracted with <em>results</em>. Suppose we have two factors <code>genotype</code> (with values I, II, and III) and <code>condition</code> (with values A and B), and we want to extract the condition effect specifically for each genotype. We could use the following approach to obtain, e.g. the condition effect for genotype I:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds$group <-<span class="st"> </span><span class="kw">factor</span>(<span class="kw">paste0</span>(dds$genotype, dds$condition))
+<span class="kw">design</span>(dds) <-<span class="st"> </span><span class="er">~</span><span class="st"> </span>group
+dds <-<span class="st"> </span><span class="kw">DESeq</span>(dds)
+<span class="kw">resultsNames</span>(dds)
+<span class="kw">results</span>(dds, <span class="dt">contrast=</span><span class="kw">c</span>(<span class="st">"group"</span>, <span class="st">"IB"</span>, <span class="st">"IA"</span>))</code></pre></div>
+<p>The following two plots diagram hypothetical genotype-specific condition effects, which could be modeled with interaction terms by using a design of <code>~genotype + condition + genotype:condition</code>.</p>
+<p>In the first plot (Gene 1), note that the condition effect is consistent across genotypes. Although condition A has a different baseline for I,II, and III, the condition effect is a log2 fold change of about 2 for each genotype. Using a model with an interaction term <code>genotype:condition</code>, the interaction terms for genotype II and genotype III will be nearly 0.</p>
+<p>Here, the y-axis represents log2(n+1), and each group has 20 samples (black dots). A red line connects the mean of the groups within each genotype.</p>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdaWAT5fr38XuSJk2TAi2UFmS1rIKIgLueHgqoHEFAZVFARPTvws7hsBdkE1BENlFABAXxIAgKCkpZFFkUqOBBEBAQ2SlLS6FN0mZ7XuRxCF0zbdJJ0u/nVa4798xchDb9NZ25R3K5XAIAAACAdzRqNwAAAAAEEwI0AAAAoAABGgAAAFCAAA0AAAAoQIAGAAAAFCBAAwAAAAoQoAEAAAAFCNAAAACAAgRoAAAAQAECNAAAAKAAARoAAABQgAANAAAAKECABgAAABQIU7uBoJeRkZGSknLhwoXz589rNJro6OiGDRs2b948IiJC7dYAAADgewToYnK5XKtWrfrkk082bdpks9lyPavT6Tp27DhgwICEhARV2 [...]
+<p>In the second plot (Gene 2), we can see that the condition effect is not consistent across genotype. Here the main condition effect (the effect for the reference genotype I) is again 2. However, this time the interaction terms will be around 1 for genotype II and -4 for genotype III. This is because the condition effect is higher by 1 for genotype II compared to genotype I, and lower by 4 for genotype III compared to genotype I. The condition effect for genotype II (or III) is obtaine [...]
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdd2AU1d7/8TO7yW5CAiQQQpEg3SAP7YoGUZoUhYBAgEtVBK8idlQQFRGvXq+CXrEjIiIIDwIJCUWFRDACV0ORDqEX6YT0uvX3xz6/cUnZ7GR3M5vd9+uvObNnZr5Zks2HyZlzJKvVKgAAAAA4R6N2AQAAAEBNQoAGAAAAFCBAAwAAAAoQoAEAAAAFCNAAAACAAgRoAAAAQAECNAAAAKAAARoAAABQgAANAAAAKECABgAAABQgQAMAAAAKEKABAAAABQjQAAAAgAIBahdQ4+Xk5Ozevfvy5cuXLl3SaDTh4eHR0dF/+9vfgoOD1S4NAAAA7keAriKr1bp69epvv/02OTnZaDSWejUwMHDo0KHPPPNMz549V [...]
+<p>Now we will continue to explain the use of interactions in order to test for <em>differences</em> in condition effects. We continue with the example of condition effects across three genotypes (I, II, and III).</p>
+<p>The key point to remember about designs with interaction terms is that, unlike for a design <code>~genotype + condition</code>, where the condition effect represents the <em>overall</em> effect controlling for differences due to genotype, by adding <code>genotype:condition</code>, the main condition effect only represents the effect of condition for the <em>reference level</em> of genotype (I, or whichever level was defined by the user as the reference level). The interaction terms <c [...]
+<p>This genotype-condition interaction example is examined in further detail in Example 3 in the help page for <em>results</em>, which can be found by typing <code>?results</code>. In particular, we show how to test for differences in the condition effect across genotype, and we show how to obtain the condition effect for non-reference genotypes.</p>
+<p>Note that for DESeq2 versions higher than 1.10, the <em>DESeq</em> function will turn off log fold change shrinkage (setting <code>betaPrior=FALSE</code>), for designs which contain an interaction term. Turning off the log fold change shrinkage allows the software to use standard model matrices (as would be produced by <em>model.matrix</em>), where the interaction coefficients are easier to interpret.</p>
+</div>
+<div id="time-series-experiments" class="section level2">
+<h2>Time-series experiments</h2>
+<p>There are a number of ways to analyze time-series experiments, depending on the biological question of interest. In order to test for any differences over multiple time points, once can use a design including the time factor, and then test using the likelihood ratio test as described in the following section, where the time factor is removed in the reduced formula. For a control and treatment time series, one can use a design formula containing the condition factor, the time factor, a [...]
+</div>
+<div id="likelihood-ratio-test" class="section level2">
+<h2>Likelihood ratio test</h2>
+<p>DESeq2 offers two kinds of hypothesis tests: the Wald test, where we use the estimated standard error of a log2 fold change to test if it is equal to zero, and the likelihood ratio test (LRT). The LRT examines two models for the counts, a <em>full</em> model with a certain number of terms and a <em>reduced</em> model, in which some of the terms of the <em>full</em> model are removed. The test determines if the increased likelihood of the data using the extra terms in the <em>full</em> [...]
+<p>The LRT is therefore useful for testing multiple terms at once, for example testing 3 or more levels of a factor at once, or all interactions between two variables. The LRT for count data is conceptually similar to an analysis of variance (ANOVA) calculation in linear regression, except that in the case of the Negative Binomial GLM, we use an analysis of deviance (ANODEV), where the <em>deviance</em> captures the difference in likelihood between a full and a reduced model.</p>
+<p>The likelihood ratio test can be performed by specifying <code>test="LRT"</code> when using the <em>DESeq</em> function, and providing a reduced design formula, e.g. one in which a number of terms from <code>design(dds)</code> are removed. The degrees of freedom for the test is obtained from the difference between the number of parameters in the two models. A simple likelihood ratio test, if the full design was <code>~condition</code> would look like:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span><span class="kw">DESeq</span>(dds, <span class="dt">test=</span><span class="st">"LRT"</span>, <span class="dt">reduced=</span>~<span class="dv">1</span>)
+res <-<span class="st"> </span><span class="kw">results</span>(dds)</code></pre></div>
+<p>If the full design contained other variables, such as a batch variable, e.g. <code>~batch + condition</code> then the likelihood ratio test would look like:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span><span class="kw">DESeq</span>(dds, <span class="dt">test=</span><span class="st">"LRT"</span>, <span class="dt">reduced=</span>~batch)
+res <-<span class="st"> </span><span class="kw">results</span>(dds)</code></pre></div>
+<p><a name="outlier"></a></p>
+</div>
+<div id="approach-to-count-outliers" class="section level2">
+<h2>Approach to count outliers</h2>
+<p>RNA-seq data sometimes contain isolated instances of very large counts that are apparently unrelated to the experimental or study design, and which may be considered outliers. There are many reasons why outliers can arise, including rare technical or experimental artifacts, read mapping problems in the case of genetically differing samples, and genuine, but rare biological events. In many cases, users appear primarily interested in genes that show a consistent behavior, and this is th [...]
+<p>The <em>DESeq</em> function calculates, for every gene and for every sample, a diagnostic test for outliers called <em>Cook’s distance</em>. Cook’s distance is a measure of how much a single sample is influencing the fitted coefficients for a gene, and a large value of Cook’s distance is intended to indicate an outlier count. The Cook’s distances are stored as a matrix available in <code>assays(dds)[["cooks"]]</code>.</p>
+<p>The <em>results</em> function automatically flags genes which contain a Cook’s distance above a cutoff for samples which have 3 or more replicates. The <em>p</em> values and adjusted <em>p</em> values for these genes are set to <code>NA</code>. At least 3 replicates are required for flagging, as it is difficult to judge which sample might be an outlier with only 2 replicates. This filtering can be turned off with <code>results(dds, cooksCutoff=FALSE)</code>.</p>
+<p>With many degrees of freedom – i.,e., many more samples than number of parameters to be estimated – it is undesirable to remove entire genes from the analysis just because their data include a single count outlier. When there are 7 or more replicates for a given sample, the <em>DESeq</em> function will automatically replace counts with large Cook’s distance with the trimmed mean over all samples, scaled up by the size factor or normalization factor for that sample. This approach is co [...]
+<p>The default Cook’s distance cutoff for the two behaviors described above depends on the sample size and number of parameters to be estimated. The default is to use the 99% quantile of the F(p,m-p) distribution (with <em>p</em> the number of parameters including the intercept and <em>m</em> number of samples). The default for gene flagging can be modified using the <code>cooksCutoff</code> argument to the <em>results</em> function. For outlier replacement, <em>DESeq</em> preserves the  [...]
+<p><strong>Note on many outliers:</strong> if there are very many outliers (e.g. many hundreds or thousands) reported by <code>summary(res)</code>, one might consider further exploration to see if a single sample or a few samples should be removed due to low quality. The automatic outlier filtering/replacement is most useful in situations which the number of outliers is limited. When there are thousands of reported outliers, it might make more sense to turn off the outlier filtering/repl [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">par</span>(<span class="dt">mar=</span><span class="kw">c</span>(<span class="dv">8</span>,<span class="dv">5</span>,<span class="dv">2</span>,<span class="dv">2</span>))
+<span class="kw">boxplot</span>(<span class="kw">log10</span>(<span class="kw">assays</span>(dds)[[<span class="st">"cooks"</span>]]), <span class="dt">range=</span><span class="dv">0</span>, <span class="dt">las=</span><span class="dv">2</span>)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAMAAADNCOCpAAAC/VBMVEUAAAABAQECAgIDAwMEBAQFBQUGBgYHBwcICAgJCQkKCgoLCwsMDAwNDQ0ODg4PDw8QEBARERESEhITExMUFBQVFRUWFhYXFxcYGBgZGRkaGhobGxscHBwdHR0eHh4fHx8gICAhISEiIiIjIyMkJCQlJSUmJiYnJycoKCgpKSkqKiorKyssLCwtLS0uLi4vLy8wMDAxMTEyMjIzMzM0NDQ1NTU2NjY3Nzc4ODg5OTk6Ojo7Ozs8PDw9PT0+Pj4/Pz9AQEBBQUFCQkJDQ0NERERFRUVGRkZHR0dISEhJSUlKSkpLS0tMTExNTU1OTk5PT09QUFBRUVFSUlJTU1NUVFRVVVVWVlZXV1dYWFhZWVlaWlpbW1tcXFxdXV1eXl5fX19gYGBhYWFiYmJjY2NkZ [...]
+</div>
+<div id="dispersion-plot-and-fitting-alternatives" class="section level2">
+<h2>Dispersion plot and fitting alternatives</h2>
+<p>Plotting the dispersion estimates is a useful diagnostic. The dispersion plot below is typical, with the final estimates shrunk from the gene-wise estimates towards the fitted estimates. Some gene-wise estimates are flagged as outliers and not shrunk towards the fitted value, (this outlier detection is described in the manual page for <em>estimateDispersionsMAP</em>). The amount of shrinkage can be more or less than seen here, depending on the sample size, the number of coefficients,  [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plotDispEsts</span>(dds)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdd1xT59sG8CsJe4hsRAUUxVH33lato+696qzVWrWvo7ba1mrds4q1trWtu26to+6tWMW9QQVBEJEtKDskef9IyAZyEMGfXt9P/0gOzzl5gra98uQ+9yNSKBQgIiIiIiLTiEt6AkRERERE/0sYoImIiIiIBGCAJiIiIiISgAGaiIiIiEgABmgiIiIiIgEYoImIiIiIBGCAJiIiIiISgAGaiIiIiEgABmgiIiIiIgEYoImIiIiIBGCAJiIiIiISgAGaiIiIiEgABmgiIiIiIgEYoImIiIiIBGCAJiIiIiISgAGaiIiIiEgABmgiIiIiIgEYoImIiIiIBGCAJiIiIiISgAGaiIiIiEgABmgiIiIiIgEYoImIi [...]
+<div id="local-or-mean-dispersion-fit" class="section level3">
+<h3>Local or mean dispersion fit</h3>
+<p>A local smoothed dispersion fit is automatically substitited in the case that the parametric curve doesn’t fit the observed dispersion mean relationship. This can be prespecified by providing the argument <code>fitType="local"</code> to either <em>DESeq</em> or <em>estimateDispersions</em>. Additionally, using the mean of gene-wise disperion estimates as the fitted value can be specified by providing the argument <code>fitType="mean"</code>.</p>
+</div>
+<div id="supply-a-custom-dispersion-fit" class="section level3">
+<h3>Supply a custom dispersion fit</h3>
+<p>Any fitted values can be provided during dispersion estimation, using the lower-level functions described in the manual page for <em>estimateDispersionsGeneEst</em>. In the code chunk below, we store the gene-wise estimates which were already calculated and saved in the metadata column <code>dispGeneEst</code>. Then we calculate the median value of the dispersion estimates above a threshold, and save these values as the fitted dispersions, using the replacement function for <em>disper [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">ddsCustom <-<span class="st"> </span>dds
+useForMedian <-<span class="st"> </span><span class="kw">mcols</span>(ddsCustom)$dispGeneEst ><span class="st"> </span><span class="fl">1e-7</span>
+medianDisp <-<span class="st"> </span><span class="kw">median</span>(<span class="kw">mcols</span>(ddsCustom)$dispGeneEst[useForMedian],
+                     <span class="dt">na.rm=</span><span class="ot">TRUE</span>)
+<span class="kw">dispersionFunction</span>(ddsCustom) <-<span class="st"> </span>function(mu) medianDisp
+ddsCustom <-<span class="st"> </span><span class="kw">estimateDispersionsMAP</span>(ddsCustom)</code></pre></div>
+<p><a name="indfilt"></a></p>
+</div>
+</div>
+<div id="independent-filtering-of-results" class="section level2">
+<h2>Independent filtering of results</h2>
+<p>The <em>results</em> function of the DESeq2 package performs independent filtering by default using the mean of normalized counts as a filter statistic. A threshold on the filter statistic is found which optimizes the number of adjusted <em>p</em> values lower than a significance level <code>alpha</code> (we use the standard variable name for significance level, though it is unrelated to the dispersion parameter <span class="math inline">\(\alpha\)</span>). The theory behind independe [...]
+<p>The default independent filtering is performed using the <em>filtered_p</em> function of the <a href="http://bioconductor.org/packages/genefilter">genefilter</a> package, and all of the arguments of <em>filtered_p</em> can be passed to the <em>results</em> function. The filter threshold value and the number of rejections at each quantile of the filter statistic are available as metadata of the object returned by <em>results</em>.</p>
+<p>For example, we can visualize the optimization by plotting the <code>filterNumRej</code> attribute of the results object. The <em>results</em> function maximizes the number of rejections (adjusted <em>p</em> value less than a significance level), over the quantiles of a filter statistic (the mean of normalized counts). The threshold chosen (vertical line) is the lowest quantile of the filter for which the number of rejections is within 1 residual standard deviation to the peak of a cu [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">metadata</span>(res)$alpha</code></pre></div>
+<pre><code>## [1] 0.1</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">metadata</span>(res)$filterThreshold</code></pre></div>
+<pre><code>## 27.14286% 
+##  5.561712</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plot</span>(<span class="kw">metadata</span>(res)$filterNumRej, 
+     <span class="dt">type=</span><span class="st">"b"</span>, <span class="dt">ylab=</span><span class="st">"number of rejections"</span>,
+     <span class="dt">xlab=</span><span class="st">"quantiles of filter"</span>)
+<span class="kw">lines</span>(<span class="kw">metadata</span>(res)$lo.fit, <span class="dt">col=</span><span class="st">"red"</span>)
+<span class="kw">abline</span>(<span class="dt">v=</span><span class="kw">metadata</span>(res)$filterTheta)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdeVxN6R8H8O+97ZF2kiIl0oTImn4oKUwUkTW77FuWLJOxVLbCGFmykz0iu4jGrihJoYSQFto31b3n98dprlTimOpSn/fLH5zznHO/54o+PfdZeAzDEAAAAAAAfB++uAsAAAAAAPiVIEADAAAAAHCAAA0AAAAAwAECNAAAAAAABwjQAAAAAAAcIEADAAAAAHCAAA0AAAAAwAECNAAAAAAABwjQAAAAAAAcIEADAAAAAHCAAA0AAAAAwAECNAAAAAAABwjQAAAAAAAcIEADAAAAAHCAAA0AAAAAwAECNAAAAAAABwjQAAAAAAAcIEADAAAAAHCAAA0AAAAAwAECNAAAAAAABwjQAAAAAAAcIEADAAAAAHCAA [...]
+<p>Independent filtering can be turned off by setting <code>independentFiltering</code> to <code>FALSE</code>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">resNoFilt <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">independentFiltering=</span><span class="ot">FALSE</span>)
+<span class="kw">addmargins</span>(<span class="kw">table</span>(<span class="dt">filtering=</span>(res$padj <<span class="st"> </span>.<span class="dv">1</span>),
+                 <span class="dt">noFiltering=</span>(resNoFilt$padj <<span class="st"> </span>.<span class="dv">1</span>)))</code></pre></div>
+<pre><code>##          noFiltering
+## filtering FALSE TRUE  Sum
+##     FALSE  7426    0 7426
+##     TRUE    113  939 1052
+##     Sum    7539  939 8478</code></pre>
+</div>
+<div id="tests-of-log2-fold-change-above-or-below-a-threshold" class="section level2">
+<h2>Tests of log2 fold change above or below a threshold</h2>
+<p>It is also possible to provide thresholds for constructing Wald tests of significance. Two arguments to the <em>results</em> function allow for threshold-based Wald tests: <code>lfcThreshold</code>, which takes a numeric of a non-negative threshold value, and <code>altHypothesis</code>, which specifies the kind of test. Note that the <em>alternative hypothesis</em> is specified by the user, i.e. those genes which the user is interested in finding, and the test provides <em>p</em> valu [...]
+<ul>
+<li><code>greaterAbs</code> - <span class="math inline">\(|\beta| > x\)</span> - tests are two-tailed</li>
+<li><code>lessAbs</code> - <span class="math inline">\(|\beta| < x\)</span> - <em>p</em> values are the maximum of the upper and lower tests</li>
+<li><code>greater</code> - <span class="math inline">\(\beta > x\)</span></li>
+<li><code>less</code> - <span class="math inline">\(\beta < -x\)</span></li>
+</ul>
+<p>The test <code>altHypothesis="lessAbs"</code> requires that the user have run <em>DESeq</em> with the argument <code>betaPrior=FALSE</code>. To understand the reason for this requirement, consider that during hypothesis testing, the null hypothesis is favored unless the data provide strong evidence to reject the null. For this test, including a zero-centered prior on log fold change would favor the alternative hypothesis, shrinking log fold changes toward zero. Removing the  [...]
+<p>The four possible values of <code>altHypothesis</code> are demonstrated in the following code and visually by MA-plots in the following figures. First we run <em>DESeq</em> and specify <code>betaPrior=FALSE</code> in order to demonstrate <code>altHypothesis="lessAbs"</code>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">ddsNoPrior <-<span class="st"> </span><span class="kw">DESeq</span>(dds, <span class="dt">betaPrior=</span><span class="ot">FALSE</span>)</code></pre></div>
+<p>In order to produce results tables for the following tests, the same arguments (except <code>ylim</code>) would be provided to the <em>results</em> function.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">par</span>(<span class="dt">mfrow=</span><span class="kw">c</span>(<span class="dv">2</span>,<span class="dv">2</span>),<span class="dt">mar=</span><span class="kw">c</span>(<span class="dv">2</span>,<span class="dv">2</span>,<span class="dv">1</span>,<span class="dv">1</span>))
+yl <-<span class="st"> </span><span class="kw">c</span>(-<span class="fl">2.5</span>,<span class="fl">2.5</span>)
+resGA <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">lfcThreshold=</span>.<span class="dv">5</span>, <span class="dt">altHypothesis=</span><span class="st">"greaterAbs"</span>)
+resLA <-<span class="st"> </span><span class="kw">results</span>(ddsNoPrior, <span class="dt">lfcThreshold=</span>.<span class="dv">5</span>, <span class="dt">altHypothesis=</span><span class="st">"lessAbs"</span>)
+resG <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">lfcThreshold=</span>.<span class="dv">5</span>, <span class="dt">altHypothesis=</span><span class="st">"greater"</span>)
+resL <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">lfcThreshold=</span>.<span class="dv">5</span>, <span class="dt">altHypothesis=</span><span class="st">"less"</span>)
+<span class="kw">plotMA</span>(resGA, <span class="dt">ylim=</span>yl)
+<span class="kw">abline</span>(<span class="dt">h=</span><span class="kw">c</span>(-.<span class="dv">5</span>,.<span class="dv">5</span>),<span class="dt">col=</span><span class="st">"dodgerblue"</span>,<span class="dt">lwd=</span><span class="dv">2</span>)
+<span class="kw">plotMA</span>(resLA, <span class="dt">ylim=</span>yl)
+<span class="kw">abline</span>(<span class="dt">h=</span><span class="kw">c</span>(-.<span class="dv">5</span>,.<span class="dv">5</span>),<span class="dt">col=</span><span class="st">"dodgerblue"</span>,<span class="dt">lwd=</span><span class="dv">2</span>)
+<span class="kw">plotMA</span>(resG, <span class="dt">ylim=</span>yl)
+<span class="kw">abline</span>(<span class="dt">h=</span>.<span class="dv">5</span>,<span class="dt">col=</span><span class="st">"dodgerblue"</span>,<span class="dt">lwd=</span><span class="dv">2</span>)
+<span class="kw">plotMA</span>(resL, <span class="dt">ylim=</span>yl)
+<span class="kw">abline</span>(<span class="dt">h=</span>-.<span class="dv">5</span>,<span class="dt">col=</span><span class="st">"dodgerblue"</span>,<span class="dt">lwd=</span><span class="dv">2</span>)</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOydeZgdVZn/T93uTqc7nT2SBDKYZSDgBIWACmgelEUQHTHdwoCgI/xQIhEFiYwQYwQSwAwRZADxYWQZgmCgOwMqIwhhMI6PLElYAoEkpElCJ52QdOisvd/fH0UXJ3WqTp2tqk5VfT9P/3H73rO8tX3rrfe855RTLpcJAAAAAAAAQIxS2gYAAAAAAACQJeBAAwAAAAAAIAEcaAAAAAAAACSAAw0AAAAAAIAEcKABAAAAAACQAA40AAAAAAAAEsCBBgAAAAAAQAI40AAAAAAAAEgABxoAAAAAAAAJ4EADAAAAAAAgARxoAAAAAAAAJIADDQAAAAAAgARwoAEAAAAAAJAADjQAAAAAAAASwIEGAAAAAABAAjjQA [...]
+<p><a name="access"></a></p>
+</div>
+<div id="access-to-all-calculated-values" class="section level2">
+<h2>Access to all calculated values</h2>
+<p>All row-wise calculated values (intermediate dispersion calculations, coefficients, standard errors, etc.) are stored in the <em>DESeqDataSet</em> object, e.g. <code>dds</code> in this vignette. These values are accessible by calling <em>mcols</em> on <code>dds</code>. Descriptions of the columns are accessible by two calls to <em>mcols</em>. Note that the call to <code>substr</code> below is only for display purposes.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">mcols</span>(dds,<span class="dt">use.names=</span><span class="ot">TRUE</span>)[<span class="dv">1</span>:<span class="dv">4</span>,<span class="dv">1</span>:<span class="dv">4</span>]</code></pre></div>
+<pre><code>## DataFrame with 4 rows and 4 columns
+##                    gene     baseMean      baseVar   allZero
+##                <factor>    <numeric>    <numeric> <logical>
+## FBgn0000008 FBgn0000008   95.1440790 2.246236e+02     FALSE
+## FBgn0000014 FBgn0000014    1.0565722 2.962193e+00     FALSE
+## FBgn0000015 FBgn0000015    0.8467233 1.008136e+00     FALSE
+## FBgn0000017 FBgn0000017 4352.5928988 3.616417e+05     FALSE</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">substr</span>(<span class="kw">names</span>(<span class="kw">mcols</span>(dds)),<span class="dv">1</span>,<span class="dv">10</span>) </code></pre></div>
+<pre><code>##  [1] "gene"       "baseMean"   "baseVar"    "allZero"    "dispGeneEs"
+##  [6] "dispFit"    "dispersion" "dispIter"   "dispOutlie" "dispMAP"   
+## [11] "Intercept"  "condition_" "SE_Interce" "SE_conditi" "WaldStatis"
+## [16] "WaldStatis" "WaldPvalue" "WaldPvalue" "betaConv"   "betaIter"  
+## [21] "deviance"   "maxCooks"</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">mcols</span>(<span class="kw">mcols</span>(dds), <span class="dt">use.names=</span><span class="ot">TRUE</span>)[<span class="dv">1</span>:<span class="dv">4</span>,]</code></pre></div>
+<pre><code>## DataFrame with 4 rows and 2 columns
+##                  type                                   description
+##           <character>                                   <character>
+## gene            input                                              
+## baseMean intermediate     mean of normalized counts for all samples
+## baseVar  intermediate variance of normalized counts for all samples
+## allZero  intermediate                all counts for a gene are zero</code></pre>
+<p>The mean values <span class="math inline">\(\mu_{ij} = s_j q_{ij}\)</span> and the Cook’s distances for each gene and sample are stored as matrices in the assays slot:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(<span class="kw">assays</span>(dds)[[<span class="st">"mu"</span>]])</code></pre></div>
+<pre><code>##                 treated1     treated2     treated3  untreated1  untreated2
+## FBgn0000008  154.3987025   71.8640585   78.6026192  107.292174  169.007435
+## FBgn0000014    1.4994497    0.6979109    0.7633527    1.472389    2.319318
+## FBgn0000015    0.5665956    0.2637189    0.2884474    1.454158    2.290600
+## FBgn0000017 6450.3164679 3002.2656444 3283.7825771 5301.611319 8351.137808
+## FBgn0000018  658.3598354  306.4300993  335.1634866  492.700578  776.105635
+## FBgn0000024   11.4502130    5.3294410    5.8291729    6.885336   10.845833
+##               untreated3   untreated4
+## FBgn0000008   61.2260212   70.8539000
+## FBgn0000014    0.8402153    0.9723404
+## FBgn0000015    0.8298117    0.9603008
+## FBgn0000017 3025.3517513 3501.0926097
+## FBgn0000018  281.1583999  325.3709575
+## FBgn0000024    3.9291004    4.5469570</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(<span class="kw">assays</span>(dds)[[<span class="st">"cooks"</span>]])</code></pre></div>
+<pre><code>##               treated1    treated2    treated3 untreated1   untreated2
+## FBgn0000008 0.08830715 0.303802564 0.077771958 0.09822247 0.0136367583
+## FBgn0000014 1.88689792 0.218390054 0.251831283 1.88370036 0.1838377649
+## FBgn0000015 0.08092977 0.072800933 0.077912313 0.12891986 0.0028137530
+## FBgn0000017 0.01373552 0.004963502 0.002162421 0.08041054 0.0106072921
+## FBgn0000018 0.09518037 0.004725965 0.054717320 0.18464143 0.0022810730
+## FBgn0000024 0.06630034 0.131135115 0.031232734 0.27067340 0.0004894252
+##             untreated3   untreated4
+## FBgn0000008 0.18898455 0.0005301336
+## FBgn0000014 0.15380104 0.1890109982
+## FBgn0000015 0.00324020 0.1048705154
+## FBgn0000017 0.17246780 0.0550852696
+## FBgn0000018 0.07648755 0.0108795118
+## FBgn0000024 0.03105357 0.0814894716</code></pre>
+<p>The dispersions <span class="math inline">\(\alpha_i\)</span> can be accessed with the <em>dispersions</em> function.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(<span class="kw">dispersions</span>(dds))</code></pre></div>
+<pre><code>## [1] 0.03040956 2.86301787 2.20957889 0.01283362 0.01560434 0.23856732</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(<span class="kw">mcols</span>(dds)$dispersion)</code></pre></div>
+<pre><code>## [1] 0.03040956 2.86301787 2.20957889 0.01283362 0.01560434 0.23856732</code></pre>
+<p>The size factors <span class="math inline">\(s_j\)</span> are accessible via <em>sizeFactors</em>:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">sizeFactors</span>(dds)</code></pre></div>
+<pre><code>##   treated1   treated2   treated3 untreated1 untreated2 untreated3 
+##  1.6355751  0.7612698  0.8326526  1.1382630  1.7930004  0.6495470 
+## untreated4 
+##  0.7516892</code></pre>
+<p>For advanced users, we also include a convenience function <em>coef</em> for extracting the matrix <span class="math inline">\([\beta_{ir}]\)</span> for all genes <em>i</em> and model coefficients <span class="math inline">\(r\)</span>. This function can also return a matrix of standard errors, see <code>?coef</code>. The columns of this matrix correspond to the effects returned by <em>resultsNames</em>. Note that the <em>results</em> function is best for building results tables with  [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">head</span>(<span class="kw">coef</span>(dds))</code></pre></div>
+<pre><code>##              Intercept condition_treated_vs_untreated
+## FBgn0000008  6.5585671                    0.002151683
+## FBgn0000014  0.3713251                   -0.496689957
+## FBgn0000015  0.3533500                   -1.882756713
+## FBgn0000017 12.1853813                   -0.240025055
+## FBgn0000018  8.7577334                   -0.104798934
+## FBgn0000024  2.5966931                    0.210811388</code></pre>
+<p>The beta prior variance <span class="math inline">\(\sigma_r^2\)</span> is stored as an attribute of the <em>DESeqDataSet</em>:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">attr</span>(dds, <span class="st">"betaPriorVar"</span>)</code></pre></div>
+<pre><code>## [1] 1e+06 1e+06</code></pre>
+<p>The dispersion prior variance <span class="math inline">\(\sigma_d^2\)</span> is stored as an attribute of the dispersion function:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">dispersionFunction</span>(dds)</code></pre></div>
+<pre><code>## function (q) 
+## coefs[1] + coefs[2]/q
+## <environment: 0x9c11c08>
+## attr(,"coefficients")
+## asymptDisp  extraPois 
+## 0.01396112 2.72102337 
+## attr(,"fitType")
+## [1] "parametric"
+## attr(,"varLogDispEsts")
+## [1] 0.9891644
+## attr(,"dispPriorVar")
+## [1] 0.4988066</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">attr</span>(<span class="kw">dispersionFunction</span>(dds), <span class="st">"dispPriorVar"</span>)</code></pre></div>
+<pre><code>## [1] 0.4988066</code></pre>
+<p>The version of DESeq2 which was used to construct the <em>DESeqDataSet</em> object, or the version used when <em>DESeq</em> was run, is stored here:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">metadata</span>(dds)[[<span class="st">"version"</span>]]</code></pre></div>
+<pre><code>## [1] '1.16.1'</code></pre>
+</div>
+<div id="sample-gene-dependent-normalization-factors" class="section level2">
+<h2>Sample-/gene-dependent normalization factors</h2>
+<p>In some experiments, there might be gene-dependent dependencies which vary across samples. For instance, GC-content bias or length bias might vary across samples coming from different labs or processed at different times. We use the terms <em>normalization factors</em> for a gene x sample matrix, and <em>size factors</em> for a single number per sample. Incorporating normalization factors, the mean parameter <span class="math inline">\(\mu_{ij}\)</span> becomes:</p>
+<p><span class="math display">\[ \mu_{ij} = NF_{ij} q_{ij} \]</span></p>
+<p>with normalization factor matrix <em>NF</em> having the same dimensions as the counts matrix <em>K</em>. This matrix can be incorporated as shown below. We recommend providing a matrix with row-wise geometric means of 1, so that the mean of normalized counts for a gene is close to the mean of the unnormalized counts. This can be accomplished by dividing out the current row geometric means.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">normFactors <-<span class="st"> </span>normFactors /<span class="st"> </span><span class="kw">exp</span>(<span class="kw">rowMeans</span>(<span class="kw">log</span>(normFactors)))
+<span class="kw">normalizationFactors</span>(dds) <-<span class="st"> </span>normFactors</code></pre></div>
+<p>These steps then replace <em>estimateSizeFactors</em> which occurs within the <em>DESeq</em> function. The <em>DESeq</em> function will look for pre-existing normalization factors and use these in the place of size factors (and a message will be printed confirming this).</p>
+<p>The methods provided by the <a href="http://bioconductor.org/packages/cqn">cqn</a> or <a href="http://bioconductor.org/packages/EDASeq">EDASeq</a> packages can help correct for GC or length biases. They both describe in their vignettes how to create matrices which can be used by DESeq2. From the formula above, we see that normalization factors should be on the scale of the counts, like size factors, and unlike offsets which are typically on the scale of the predictors (i.e. the logari [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">cqnOffset <-<span class="st"> </span>cqnObject$glm.offset
+cqnNormFactors <-<span class="st"> </span><span class="kw">exp</span>(cqnOffset)
+EDASeqNormFactors <-<span class="st"> </span><span class="kw">exp</span>(-<span class="dv">1</span> *<span class="st"> </span>EDASeqOffset)</code></pre></div>
+</div>
+<div id="model-matrix-not-full-rank" class="section level2">
+<h2>“Model matrix not full rank”</h2>
+<p>While most experimental designs run easily using design formula, some design formulas can cause problems and result in the <em>DESeq</em> function returning an error with the text: “the model matrix is not full rank, so the model cannot be fit as specified.” There are two main reasons for this problem: either one or more columns in the model matrix are linear combinations of other columns, or there are levels of factors or combinations of levels of multiple factors which are missing s [...]
+<div id="linear-combinations" class="section level3">
+<h3>Linear combinations</h3>
+<p>The simplest case is the linear combination, or linear dependency problem, when two variables contain exactly the same information, such as in the following sample table. The software cannot fit an effect for <code>batch</code> and <code>condition</code>, because they produce identical columns in the model matrix. This is also referred to as <em>perfect confounding</em>. A unique solution of coefficients (the <span class="math inline">\(\beta_i\)</span> in the formula <a href="#theory [...]
+<pre><code>## DataFrame with 4 rows and 2 columns
+##      batch condition
+##   <factor>  <factor>
+## 1        1         A
+## 2        1         A
+## 3        2         B
+## 4        2         B</code></pre>
+<p>Another situation which will cause problems is when the variables are not identical, but one variable can be formed by the combination of other factor levels. In the following example, the effect of batch 2 vs 1 cannot be fit because it is identical to a column in the model matrix which represents the condition C vs A effect.</p>
+<pre><code>## DataFrame with 6 rows and 2 columns
+##      batch condition
+##   <factor>  <factor>
+## 1        1         A
+## 2        1         A
+## 3        1         B
+## 4        1         B
+## 5        2         C
+## 6        2         C</code></pre>
+<p>In both of these cases above, the batch effect cannot be fit and must be removed from the model formula. There is just no way to tell apart the condition effects and the batch effects. The options are either to assume there is no batch effect (which we know is highly unlikely given the literature on batch effects in sequencing datasets) or to repeat the experiment and properly balance the conditions across batches. A balanced design would look like:</p>
+<pre><code>## DataFrame with 6 rows and 2 columns
+##      batch condition
+##   <factor>  <factor>
+## 1        1         A
+## 2        1         B
+## 3        1         C
+## 4        2         A
+## 5        2         B
+## 6        2         C</code></pre>
+<p><a name="nested-indiv"></a></p>
+</div>
+<div id="group-specific-condition-effects-individuals-nested-within-groups" class="section level3">
+<h3>Group-specific condition effects, individuals nested within groups</h3>
+<p>Finally, there is a case where we <em>can</em> in fact perform inference, but we may need to re-arrange terms to do so. Consider an experiment with grouped individuals, where we seek to test the group-specific effect of a condition or treatment, while controlling for individual effects. The individuals are nested within the groups: an individual can only be in one of the groups, although each individual has one or more observations across condition.</p>
+<p>An example of such an experiment is below:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">coldata <-<span class="st"> </span><span class="kw">DataFrame</span>(<span class="dt">grp=</span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="kw">c</span>(<span class="st">"X"</span>,<span class="st">"Y"</span>),<span class="dt">each=</span><span class="dv">6</span>)),
+                       <span class="dt">ind=</span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="dv">1</span>:<span class="dv">6</span>,<span class="dt">each=</span><span class="dv">2</span>)),
+                      <span class="dt">cnd=</span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="kw">c</span>(<span class="st">"A"</span>,<span class="st">"B"</span>),<span class="dv">6</span>)))
+coldata</code></pre></div>
+<pre><code>## DataFrame with 12 rows and 3 columns
+##          grp      ind      cnd
+##     <factor> <factor> <factor>
+## 1          X        1        A
+## 2          X        1        B
+## 3          X        2        A
+## 4          X        2        B
+## 5          X        3        A
+## ...      ...      ...      ...
+## 8          Y        4        B
+## 9          Y        5        A
+## 10         Y        5        B
+## 11         Y        6        A
+## 12         Y        6        B</code></pre>
+<p>Note that individual (<code>ind</code>) is a <em>factor</em> not a numeric. This is very important.</p>
+<p>To make R display all the rows, we can do:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">as.data.frame</span>(coldata)</code></pre></div>
+<pre><code>##    grp ind cnd
+## 1    X   1   A
+## 2    X   1   B
+## 3    X   2   A
+## 4    X   2   B
+## 5    X   3   A
+## 6    X   3   B
+## 7    Y   4   A
+## 8    Y   4   B
+## 9    Y   5   A
+## 10   Y   5   B
+## 11   Y   6   A
+## 12   Y   6   B</code></pre>
+<p>We have two groups of samples X and Y, each with three distinct individuals (labeled here 1-6). For each individual, we have conditions A and B (for example, this could be control and treated).</p>
+<p>This design can be analyzed by DESeq2 but requires a bit of refactoring in order to fit the model terms. Here we will use a trick described in the <a href="http://bioconductor.org/packages/edgeR">edgeR</a> user guide, from the section <em>Comparisons Both Between and Within Subjects</em>. If we try to analyze with a formula such as, <code>~ ind + grp*cnd</code>, we will obtain an error, because the effect for group is a linear combination of the individuals.</p>
+<p>However, the following steps allow for an analysis of group-specific condition effects, while controlling for differences in individual. For object construction, you can use a simple design, such as <code>~ ind + cnd</code>, as long as you remember to replace it before running <em>DESeq</em>. Then add a column <code>ind.n</code> which distinguishes the individuals nested within a group. Here, we add this column to coldata, but in practice you would add this column to <code>dds</code>.</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">coldata$ind.n <-<span class="st"> </span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="kw">rep</span>(<span class="dv">1</span>:<span class="dv">3</span>,<span class="dt">each=</span><span class="dv">2</span>),<span class="dv">2</span>))
+<span class="kw">as.data.frame</span>(coldata)</code></pre></div>
+<pre><code>##    grp ind cnd ind.n
+## 1    X   1   A     1
+## 2    X   1   B     1
+## 3    X   2   A     2
+## 4    X   2   B     2
+## 5    X   3   A     3
+## 6    X   3   B     3
+## 7    Y   4   A     1
+## 8    Y   4   B     1
+## 9    Y   5   A     2
+## 10   Y   5   B     2
+## 11   Y   6   A     3
+## 12   Y   6   B     3</code></pre>
+<p>Now we can reassign our <em>DESeqDataSet</em> a design of <code>~ grp + grp:ind.n + grp:cnd</code>, before we call <em>DESeq</em>. This new design will result in the following model matrix:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">model.matrix</span>(~<span class="st"> </span>grp +<span class="st"> </span>grp:ind.n +<span class="st"> </span>grp:cnd, coldata)</code></pre></div>
+<pre><code>##    (Intercept) grpY grpX:ind.n2 grpY:ind.n2 grpX:ind.n3 grpY:ind.n3
+## 1            1    0           0           0           0           0
+## 2            1    0           0           0           0           0
+## 3            1    0           1           0           0           0
+## 4            1    0           1           0           0           0
+## 5            1    0           0           0           1           0
+## 6            1    0           0           0           1           0
+## 7            1    1           0           0           0           0
+## 8            1    1           0           0           0           0
+## 9            1    1           0           1           0           0
+## 10           1    1           0           1           0           0
+## 11           1    1           0           0           0           1
+## 12           1    1           0           0           0           1
+##    grpX:cndB grpY:cndB
+## 1          0         0
+## 2          1         0
+## 3          0         0
+## 4          1         0
+## 5          0         0
+## 6          1         0
+## 7          0         0
+## 8          0         1
+## 9          0         0
+## 10         0         1
+## 11         0         0
+## 12         0         1
+## attr(,"assign")
+## [1] 0 1 2 2 2 2 3 3
+## attr(,"contrasts")
+## attr(,"contrasts")$grp
+## [1] "contr.treatment"
+## 
+## attr(,"contrasts")$ind.n
+## [1] "contr.treatment"
+## 
+## attr(,"contrasts")$cnd
+## [1] "contr.treatment"</code></pre>
+<p>Note that, if you have unbalanced numbers of individuals in the two groups, you will have zeros for some of the interactions between <code>grp</code> and <code>ind.n</code>. You can remove these columns manually from the model matrix and pass the corrected model matrix to the <code>full</code> argument of the <em>DESeq</em> function. See example code in the next section.</p>
+<p>Above, the terms <code>grpX.cndB</code> and <code>grpY.cndB</code> give the group-specific condition effects, in other words, the condition B vs A effect for group X samples, and likewise for group Y samples. These terms control for all of the six individual effects. These group-specific condition effects can be extracted using <em>results</em> with the <code>name</code> argument.</p>
+<p>Furthermore, <code>grpX.cndB</code> and <code>grpY.cndB</code> can be contrasted using the <code>contrast</code> argument, in order to test if the condition effect is different across group:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">results</span>(dds, <span class="dt">contrast=</span><span class="kw">list</span>(<span class="st">"grpY.cndB"</span>,<span class="st">"grpX.cndB"</span>))</code></pre></div>
+</div>
+<div id="levels-without-samples" class="section level3">
+<h3>Levels without samples</h3>
+<p>The base R function for creating model matrices will produce a column of zeros if a level is missing from a factor or a combination of levels is missing from an interaction of factors. The solution to the first case is to call <em>droplevels</em> on the column, which will remove levels without samples. This was shown in the beginning of this vignette.</p>
+<p>The second case is also solvable, by manually editing the model matrix, and then providing this to <em>DESeq</em>. Here we construct an example dataset to illustrate:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">group <-<span class="st"> </span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="dv">1</span>:<span class="dv">3</span>,<span class="dt">each=</span><span class="dv">6</span>))
+condition <-<span class="st"> </span><span class="kw">factor</span>(<span class="kw">rep</span>(<span class="kw">rep</span>(<span class="kw">c</span>(<span class="st">"A"</span>,<span class="st">"B"</span>,<span class="st">"C"</span>),<span class="dt">each=</span><span class="dv">2</span>),<span class="dv">3</span>))
+d <-<span class="st"> </span><span class="kw">DataFrame</span>(group, condition)[-<span class="kw">c</span>(<span class="dv">17</span>,<span class="dv">18</span>),]
+<span class="kw">as.data.frame</span>(d)</code></pre></div>
+<pre><code>##    group condition
+## 1      1         A
+## 2      1         A
+## 3      1         B
+## 4      1         B
+## 5      1         C
+## 6      1         C
+## 7      2         A
+## 8      2         A
+## 9      2         B
+## 10     2         B
+## 11     2         C
+## 12     2         C
+## 13     3         A
+## 14     3         A
+## 15     3         B
+## 16     3         B</code></pre>
+<p>Note that if we try to estimate all interaction terms, we introduce a column with all zeros, as there are no condition C samples for group 3. (Here, <em>unname</em> is used to display the matrix concisely.)</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">m1 <-<span class="st"> </span><span class="kw">model.matrix</span>(~<span class="st"> </span>condition*group, d)
+<span class="kw">colnames</span>(m1)</code></pre></div>
+<pre><code>## [1] "(Intercept)"       "conditionB"        "conditionC"       
+## [4] "group2"            "group3"            "conditionB:group2"
+## [7] "conditionC:group2" "conditionB:group3" "conditionC:group3"</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">unname</span>(m1)</code></pre></div>
+<pre><code>##       [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9]
+##  [1,]    1    0    0    0    0    0    0    0    0
+##  [2,]    1    0    0    0    0    0    0    0    0
+##  [3,]    1    1    0    0    0    0    0    0    0
+##  [4,]    1    1    0    0    0    0    0    0    0
+##  [5,]    1    0    1    0    0    0    0    0    0
+##  [6,]    1    0    1    0    0    0    0    0    0
+##  [7,]    1    0    0    1    0    0    0    0    0
+##  [8,]    1    0    0    1    0    0    0    0    0
+##  [9,]    1    1    0    1    0    1    0    0    0
+## [10,]    1    1    0    1    0    1    0    0    0
+## [11,]    1    0    1    1    0    0    1    0    0
+## [12,]    1    0    1    1    0    0    1    0    0
+## [13,]    1    0    0    0    1    0    0    0    0
+## [14,]    1    0    0    0    1    0    0    0    0
+## [15,]    1    1    0    0    1    0    0    1    0
+## [16,]    1    1    0    0    1    0    0    1    0
+## attr(,"assign")
+## [1] 0 1 1 2 2 3 3 3 3
+## attr(,"contrasts")
+## attr(,"contrasts")$condition
+## [1] "contr.treatment"
+## 
+## attr(,"contrasts")$group
+## [1] "contr.treatment"</code></pre>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">all.zero <-<span class="st"> </span><span class="kw">apply</span>(m1, <span class="dv">2</span>, function(x) <span class="kw">all</span>(x==<span class="dv">0</span>))
+all.zero</code></pre></div>
+<pre><code>##       (Intercept)        conditionB        conditionC            group2 
+##             FALSE             FALSE             FALSE             FALSE 
+##            group3 conditionB:group2 conditionC:group2 conditionB:group3 
+##             FALSE             FALSE             FALSE             FALSE 
+## conditionC:group3 
+##              TRUE</code></pre>
+<p>We can remove this column like so:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">idx <-<span class="st"> </span><span class="kw">which</span>(all.zero)
+m1 <-<span class="st"> </span>m1[,-idx]
+<span class="kw">unname</span>(m1)</code></pre></div>
+<pre><code>##       [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]
+##  [1,]    1    0    0    0    0    0    0    0
+##  [2,]    1    0    0    0    0    0    0    0
+##  [3,]    1    1    0    0    0    0    0    0
+##  [4,]    1    1    0    0    0    0    0    0
+##  [5,]    1    0    1    0    0    0    0    0
+##  [6,]    1    0    1    0    0    0    0    0
+##  [7,]    1    0    0    1    0    0    0    0
+##  [8,]    1    0    0    1    0    0    0    0
+##  [9,]    1    1    0    1    0    1    0    0
+## [10,]    1    1    0    1    0    1    0    0
+## [11,]    1    0    1    1    0    0    1    0
+## [12,]    1    0    1    1    0    0    1    0
+## [13,]    1    0    0    0    1    0    0    0
+## [14,]    1    0    0    0    1    0    0    0
+## [15,]    1    1    0    0    1    0    0    1
+## [16,]    1    1    0    0    1    0    0    1</code></pre>
+<p>Now this matrix <code>m1</code> can be provided to the <code>full</code> argument of <em>DESeq</em>. For a likelihood ratio test of interactions, a model matrix using a reduced design such as <code>~ condition + group</code> can be given to the <code>reduced</code> argument. Wald tests can also be generated instead of the likelihood ratio test, but for user-supplied model matrices, the argument <code>betaPrior</code> must be set to <code>FALSE</code>.</p>
+<p><a name="theory"></a></p>
+</div>
+</div>
+</div>
+<div id="theory-behind-deseq2" class="section level1">
+<h1>Theory behind DESeq2</h1>
+<div id="the-deseq2-model" class="section level2">
+<h2>The DESeq2 model</h2>
+<p>The DESeq2 model and all the steps taken in the software are described in detail in our publication <span class="citation">(Love, Huber, and Anders 2014)</span>, and we include the formula and descriptions in this section as well. The differential expression analysis in DESeq2 uses a generalized linear model of the form:</p>
+<p><span class="math display">\[ K_{ij} \sim \textrm{NB}(\mu_{ij}, \alpha_i) \]</span></p>
+<p><span class="math display">\[ \mu_{ij} = s_j q_{ij} \]</span></p>
+<p><span class="math display">\[ \log_2(q_{ij}) = x_{j.} \beta_i \]</span></p>
+<p>where counts <span class="math inline">\(K_{ij}\)</span> for gene <em>i</em>, sample <em>j</em> are modeled using a negative binomial distribution with fitted mean <span class="math inline">\(\mu_{ij}\)</span> and a gene-specific dispersion parameter <span class="math inline">\(\alpha_i\)</span>. The fitted mean is composed of a sample-specific size factor <span class="math inline">\(s_j\)</span> and a parameter <span class="math inline">\(q_{ij}\)</span> proportional to the expected  [...]
+<p>The dispersion parameter <span class="math inline">\(\alpha_i\)</span> defines the relationship between the variance of the observed count and its mean value. In other words, how far do we expected the observed count will be from the mean value, which depends both on the size factor <span class="math inline">\(s_j\)</span> and the covariate-dependent part <span class="math inline">\(q_{ij}\)</span> as defined above.</p>
+<p><span class="math display">\[ \textrm{Var}(K_{ij}) = E[ (K_{ij} - \mu_{ij})^2 ] = \mu_{ij} + \alpha_i \mu_{ij}^2 \]</span></p>
+<p>An option in DESeq2 is to provide maximum <em>a posteriori</em> estimates of the log2 fold changes in <span class="math inline">\(\beta_i\)</span> after incorporating a zero-centered Normal prior (<code>betaPrior</code>). While previously, these moderated, or shrunken, estimates were generated by <em>DESeq</em> or <em>nbinomWaldTest</em> functions, they are now produced by the <em>lfcShrink</em> function. Dispersions are estimated using expected mean values from the maximum likelihood [...]
+<ol style="list-style-type: decimal">
+<li>estimation of size factors <span class="math inline">\(s_j\)</span> by <em>estimateSizeFactors</em></li>
+<li>estimation of dispersion <span class="math inline">\(\alpha_i\)</span> by <em>estimateDispersions</em></li>
+<li>negative binomial GLM fitting for <span class="math inline">\(\beta_i\)</span> and Wald statistics by <em>nbinomWaldTest</em></li>
+</ol>
+<p>For access to all the values calculated during these steps, see the section <a href="#access">above</a>.</p>
+</div>
+<div id="changes-compared-to-deseq" class="section level2">
+<h2>Changes compared to DESeq</h2>
+<p>The main changes in the package <em>DESeq2</em>, compared to the (older) version <em>DESeq</em>, are as follows:</p>
+<ul>
+<li><em>RangedSummarizedExperiment</em> is used as the superclass for storage of input data, intermediate calculations and results.</li>
+<li>Optional, maximum <em>a posteriori</em> estimation of GLM coefficients incorporating a zero-centered Normal prior with variance estimated from data (equivalent to Tikhonov/ridge regularization). This adjustment has little effect on genes with high counts, yet it helps to moderate the otherwise large variance in log2 fold change estimates for genes with low counts or highly variable counts. These estimates are now provided by the <em>lfcShrink</em> function.</li>
+<li>Maximum <em>a posteriori</em> estimation of dispersion replaces the <code>sharingMode</code> options <code>fit-only</code> or <code>maximum</code> of the previous version of the package. This is similar to the dispersion estimation methods of DSS <span class="citation">(H. Wu, Wang, and Wu 2012)</span>.</li>
+<li>All estimation and inference is based on the generalized linear model, which includes the two condition case (previously the <em>exact test</em> was used).</li>
+<li>The Wald test for significance of GLM coefficients is provided as the default inference method, with the likelihood ratio test of the previous version still available.</li>
+<li>It is possible to provide a matrix of sample-/gene-dependent normalization factors.</li>
+<li>Automatic independent filtering on the mean of normalized counts.</li>
+<li>Automatic outlier detection and handling.</li>
+</ul>
+<p><a name="changes"></a></p>
+</div>
+<div id="methods-changes-since-the-2014-deseq2-paper" class="section level2">
+<h2>Methods changes since the 2014 DESeq2 paper</h2>
+<ul>
+<li>In version 1.16 (Novermber 2016), the log2 fold change shrinkage is no longer default for the <em>DESeq</em> and <em>nbinomWaldTest</em> functions, by setting the defaults of these to <code>betaPrior=FALSE</code>, and by introducing a separate function <em>lfcShrink</em>, which performs log2 fold change shrinkage for visualization and ranking of genes. While for the majority of bulk RNA-seq experiments, the LFC shrinkage did not affect statistical testing, DESeq2 has become used as a [...]
+<li>A small change to the independent filtering routine: instead of taking the quantile of the filter (the mean of normalized counts) which directly <em>maximizes</em> the number of rejections, the threshold chosen is the lowest quantile of the filter for which the number of rejections is close to the peak of a curve fit to the number of rejections over the filter quantiles. ``Close to’’ is defined as within 1 residual standard deviation. This change was introduced in version 1.10 (Octob [...]
+<li>For the calculation of the beta prior variance, instead of matching the empirical quantile to the quantile of a Normal distribution, DESeq2 now uses the weighted quantile function of the  package. The weighting is described in the manual page for <em>nbinomWaldTest</em>. The weights are the inverse of the expected variance of log counts (as used in the diagonals of the matrix <span class="math inline">\(W\)</span> in the GLM). The effect of the change is that the estimated prior vari [...]
+</ul>
+<p>For a list of all changes since version 1.0.0, see the <code>NEWS</code> file included in the package.</p>
+</div>
+<div id="count-outlier-detection" class="section level2">
+<h2>Count outlier detection</h2>
+<p>DESeq2 relies on the negative binomial distribution to make estimates and perform statistical inference on differences. While the negative binomial is versatile in having a mean and dispersion parameter, extreme counts in individual samples might not fit well to the negative binomial. For this reason, we perform automatic detection of count outliers. We use Cook’s distance, which is a measure of how much the fitted coefficients would change if an individual sample were removed <span c [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">W <-<span class="st"> </span>res$stat
+maxCooks <-<span class="st"> </span><span class="kw">apply</span>(<span class="kw">assays</span>(dds)[[<span class="st">"cooks"</span>]],<span class="dv">1</span>,max)
+idx <-<span class="st"> </span>!<span class="kw">is.na</span>(W)
+<span class="kw">plot</span>(<span class="kw">rank</span>(W[idx]), maxCooks[idx], <span class="dt">xlab=</span><span class="st">"rank of Wald statistic"</span>, 
+     <span class="dt">ylab=</span><span class="st">"maximum Cook's distance per gene"</span>,
+     <span class="dt">ylim=</span><span class="kw">c</span>(<span class="dv">0</span>,<span class="dv">5</span>), <span class="dt">cex=</span>.<span class="dv">4</span>, <span class="dt">col=</span><span class="kw">rgb</span>(<span class="dv">0</span>,<span class="dv">0</span>,<span class="dv">0</span>,.<span class="dv">3</span>))
+m <-<span class="st"> </span><span class="kw">ncol</span>(dds)
+p <-<span class="st"> </span><span class="dv">3</span>
+<span class="kw">abline</span>(<span class="dt">h=</span><span class="kw">qf</span>(.<span class="dv">99</span>, p, m -<span class="st"> </span>p))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAMAAADNCOCpAAADAFBMVEUAAAABAQECAgIDAwMEBAQFBQUGBgYHBwcICAgJCQkKCgoLCwsMDAwNDQ0ODg4PDw8QEBARERESEhITExMUFBQVFRUWFhYXFxcYGBgZGRkaGhobGxscHBwdHR0eHh4fHx8gICAhISEiIiIjIyMkJCQlJSUmJiYnJycoKCgpKSkqKiorKyssLCwtLS0uLi4vLy8wMDAxMTEyMjIzMzM0NDQ1NTU2NjY3Nzc4ODg5OTk6Ojo7Ozs8PDw9PT0+Pj4/Pz9AQEBBQUFCQkJDQ0NERERFRUVGRkZHR0dISEhJSUlKSkpLS0tMTExNTU1OTk5PT09QUFBRUVFSUlJTU1NUVFRVVVVWVlZXV1dYWFhZWVlaWlpbW1tcXFxdXV1eXl5fX19gYGBhYWFiYmJjY2NkZ [...]
+</div>
+<div id="contrasts-1" class="section level2">
+<h2>Contrasts</h2>
+<p>Contrasts can be calculated for a <em>DESeqDataSet</em> object for which the GLM coefficients have already been fit using the Wald test steps (<em>DESeq</em> with <code>test="Wald"</code> or using <em>nbinomWaldTest</em>). The vector of coefficients <span class="math inline">\(\beta\)</span> is left multiplied by the contrast vector <span class="math inline">\(c\)</span> to form the numerator of the test statistic. The denominator is formed by multiplying the covariance matr [...]
+<p><span class="math display">\[ W = \frac{c^t \beta}{\sqrt{c^t \Sigma c}} \]</span></p>
+</div>
+<div id="expanded-model-matrices" class="section level2">
+<h2>Expanded model matrices</h2>
+<p>DESeq2 uses <em>expanded model matrices</em> in conjunction with the log2 fold change prior, in order to produce shrunken log2 fold change estimates and test results which are independent of the choice of reference level. Another way of saying this is that the shrinkage is <em>symmetric</em> with respect to all the levels of the factors in the design. The expanded model matrices differ from the standard model matrices, in that they have an indicator column (and therefore a coefficient [...]
+<p>The expanded model matrices are not full rank, but a coefficient vector <span class="math inline">\(\beta_i\)</span> can still be found due to the zero-centered prior on non-intercept coefficients. The prior variance for the log2 fold changes is calculated by first generating maximum likelihood estimates for a standard model matrix. The prior variance for each level of a factor is then set as the average of the mean squared maximum likelihood estimates for each level and every possibl [...]
+<p><a name="indfilttheory"></a></p>
+</div>
+<div id="independent-filtering-and-multiple-testing" class="section level2">
+<h2>Independent filtering and multiple testing</h2>
+<div id="filtering-criteria" class="section level3">
+<h3>Filtering criteria</h3>
+<p>The goal of independent filtering is to filter out those tests from the procedure that have no, or little chance of showing significant evidence, without even looking at their test statistic. Typically, this results in increased detection power at the same experiment-wide type I error. Here, we measure experiment-wide type I error in terms of the false discovery rate.</p>
+<p>A good choice for a filtering criterion is one that</p>
+<ol style="list-style-type: decimal">
+<li>is statistically independent from the test statistic under the null hypothesis,</li>
+<li>is correlated with the test statistic under the alternative, and</li>
+<li>does not notably change the dependence structure – if there is any – between the tests that pass the filter, compared to the dependence structure between the tests before filtering.</li>
+</ol>
+<p>The benefit from filtering relies on property (2), and we will explore it further below. Its statistical validity relies on property (1) – which is simple to formally prove for many combinations of filter criteria with test statistics – and (3), which is less easy to theoretically imply from first principles, but rarely a problem in practice. We refer to <span class="citation">(Bourgon, Gentleman, and Huber 2010)</span> for further discussion of this topic.</p>
+<p>A simple filtering criterion readily available in the results object is the mean of normalized counts irrespective of biological condition, and so this is the criterion which is used automatically by the <em>results</em> function to perform independent filtering. Genes with very low counts are not likely to see significant differences typically due to high dispersion. For example, we can plot the <span class="math inline">\(-\log_{10}\)</span> <em>p</em> values from all genes over the [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">plot</span>(res$baseMean<span class="dv">+1</span>, -<span class="kw">log10</span>(res$pvalue),
+     <span class="dt">log=</span><span class="st">"x"</span>, <span class="dt">xlab=</span><span class="st">"mean of normalized counts"</span>,
+     <span class="dt">ylab=</span><span class="kw">expression</span>(-log[<span class="dv">10</span>](pvalue)),
+     <span class="dt">ylim=</span><span class="kw">c</span>(<span class="dv">0</span>,<span class="dv">30</span>),
+     <span class="dt">cex=</span>.<span class="dv">4</span>, <span class="dt">col=</span><span class="kw">rgb</span>(<span class="dv">0</span>,<span class="dv">0</span>,<span class="dv">0</span>,.<span class="dv">3</span>))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAMAAADNCOCpAAADAFBMVEUAAAABAQECAgIDAwMEBAQFBQUGBgYHBwcICAgJCQkKCgoLCwsMDAwNDQ0ODg4PDw8QEBARERESEhITExMUFBQVFRUWFhYXFxcYGBgZGRkaGhobGxscHBwdHR0eHh4fHx8gICAhISEiIiIjIyMkJCQlJSUmJiYnJycoKCgpKSkqKiorKyssLCwtLS0uLi4vLy8wMDAxMTEyMjIzMzM0NDQ1NTU2NjY3Nzc4ODg5OTk6Ojo7Ozs8PDw9PT0+Pj4/Pz9AQEBBQUFCQkJDQ0NERERFRUVGRkZHR0dISEhJSUlKSkpLS0tMTExNTU1OTk5PT09QUFBRUVFSUlJTU1NUVFRVVVVWVlZXV1dYWFhZWVlaWlpbW1tcXFxdXV1eXl5fX19gYGBhYWFiYmJjY2NkZ [...]
+</div>
+<div id="why-does-it-work" class="section level3">
+<h3>Why does it work?</h3>
+<p>Consider the <em>p</em> value histogram below It shows how the filtering ameliorates the multiple testing problem – and thus the severity of a multiple testing adjustment – by removing a background set of hypotheses whose <em>p</em> values are distributed more or less uniformly in [0,1].</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">use <-<span class="st"> </span>res$baseMean ><span class="st"> </span><span class="kw">metadata</span>(res)$filterThreshold
+h1 <-<span class="st"> </span><span class="kw">hist</span>(res$pvalue[!use], <span class="dt">breaks=</span><span class="dv">0</span>:<span class="dv">50</span>/<span class="dv">50</span>, <span class="dt">plot=</span><span class="ot">FALSE</span>)
+h2 <-<span class="st"> </span><span class="kw">hist</span>(res$pvalue[use], <span class="dt">breaks=</span><span class="dv">0</span>:<span class="dv">50</span>/<span class="dv">50</span>, <span class="dt">plot=</span><span class="ot">FALSE</span>)
+colori <-<span class="st"> </span><span class="kw">c</span>(<span class="st">`</span><span class="dt">do not pass</span><span class="st">`</span>=<span class="st">"khaki"</span>, <span class="st">`</span><span class="dt">pass</span><span class="st">`</span>=<span class="st">"powderblue"</span>)</code></pre></div>
+<p>Histogram of p values for all tests. The area shaded in blue indicates the subset of those that pass the filtering, the area in khaki those that do not pass:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">barplot</span>(<span class="dt">height =</span> <span class="kw">rbind</span>(h1$counts, h2$counts), <span class="dt">beside =</span> <span class="ot">FALSE</span>,
+        <span class="dt">col =</span> colori, <span class="dt">space =</span> <span class="dv">0</span>, <span class="dt">main =</span> <span class="st">""</span>, <span class="dt">ylab=</span><span class="st">"frequency"</span>)
+<span class="kw">text</span>(<span class="dt">x =</span> <span class="kw">c</span>(<span class="dv">0</span>, <span class="kw">length</span>(h1$counts)), <span class="dt">y =</span> <span class="dv">0</span>, <span class="dt">label =</span> <span class="kw">paste</span>(<span class="kw">c</span>(<span class="dv">0</span>,<span class="dv">1</span>)),
+     <span class="dt">adj =</span> <span class="kw">c</span>(<span class="fl">0.5</span>,<span class="fl">1.7</span>), <span class="dt">xpd=</span><span class="ot">NA</span>)
+<span class="kw">legend</span>(<span class="st">"topright"</span>, <span class="dt">fill=</span><span class="kw">rev</span>(colori), <span class="dt">legend=</span><span class="kw">rev</span>(<span class="kw">names</span>(colori)))</code></pre></div>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdeVzVZf7//+sgIpthoIAgpoARzXEn9Ku4QA1KaoOg0limpuYyfiq3cMGtm4iGqVmJS5pLtoh7CpqCmY6OohIEc0r0oHJMEWXRA6Is5/fHm9+RYVHfh+WQPu63/nhzva/rfV6QMz29uN7XpdDpdAIAAADAkzExdgEAAADAXwkBGgAAAJCBAA0AAADIQIAGAAAAZCBAAwAAADIQoAEAAAAZCNAAAACADARoAAAAQAYCNAAAACADARoAAACQgQANAAAAyECABgAAAGQgQAMAAAAyEKABAAAAGQjQAAAAgAwEaAAAAEAGAjQAAAAgAwEaAAAAkIEADQAAAMhAgAYAAABkIEADAAAAMhCgAQAAABkI0AAAAIAMB [...]
+<p><a name="FAQ"></a></p>
+</div>
+</div>
+</div>
+<div id="frequently-asked-questions" class="section level1">
+<h1>Frequently asked questions</h1>
+<div id="how-can-i-get-support-for-deseq2" class="section level2">
+<h2>How can I get support for DESeq2?</h2>
+<p>We welcome questions about our software, and want to ensure that we eliminate issues if and when they appear. We have a few requests to optimize the process:</p>
+<ul>
+<li>all questions should take place on the Bioconductor support site: <a href="https://support.bioconductor.org" class="uri">https://support.bioconductor.org</a>, which serves as a repository of questions and answers. This helps to save the developers’ time in responding to similar questions. Make sure to tag your post with <code>deseq2</code>. It is often very helpful in addition to describe the aim of your experiment.</li>
+<li>before posting, first search the Bioconductor support site mentioned above for past threads which might have answered your question.</li>
+<li>if you have a question about the behavior of a function, read the sections of the manual page for this function by typing a question mark and the function name, e.g. <code>?results</code>. We spend a lot of time documenting individual functions and the exact steps that the software is performing.</li>
+<li>include all of your R code, especially the creation of the <em>DESeqDataSet</em> and the design formula. Include complete warning or error messages, and conclude your message with the full output of <code>sessionInfo()</code>.</li>
+<li>if possible, include the output of <code>as.data.frame(colData(dds))</code>, so that we can have a sense of the experimental setup. If this contains confidential information, you can replace the levels of those factors using <em>levels()</em>.</li>
+</ul>
+</div>
+<div id="why-are-some-p-values-set-to-na" class="section level2">
+<h2>Why are some <em>p</em> values set to NA?</h2>
+<p>See the details <a href="#pvaluesNA">above</a>.</p>
+</div>
+<div id="how-can-i-get-unfiltered-deseq2-results" class="section level2">
+<h2>How can I get unfiltered DESeq2 results?</h2>
+<p>Users can obtain unfiltered GLM results, i.e. without outlier removal or independent filtering with the following call:</p>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">dds <-<span class="st"> </span><span class="kw">DESeq</span>(dds, <span class="dt">minReplicatesForReplace=</span><span class="ot">Inf</span>)
+res <-<span class="st"> </span><span class="kw">results</span>(dds, <span class="dt">cooksCutoff=</span><span class="ot">FALSE</span>, <span class="dt">independentFiltering=</span><span class="ot">FALSE</span>)</code></pre></div>
+<p>In this case, the only <em>p</em> values set to <code>NA</code> are those from genes with all counts equal to zero.</p>
+</div>
+<div id="how-do-i-use-vst-or-rlog-data-for-differential-testing" class="section level2">
+<h2>How do I use VST or rlog data for differential testing?</h2>
+<p>The variance stabilizing and rlog transformations are provided for applications other than differential testing, for example clustering of samples or other machine learning applications. For differential testing we recommend the <em>DESeq</em> function applied to raw counts as outlined <a href="#de">above</a>.</p>
+</div>
+<div id="can-i-use-deseq2-to-analyze-paired-samples" class="section level2">
+<h2>Can I use DESeq2 to analyze paired samples?</h2>
+<p>Yes, you should use a multi-factor design which includes the sample information as a term in the design formula. This will account for differences between the samples while estimating the effect due to the condition. The condition of interest should go at the end of the design formula, e.g. <code>~ subject + condition</code>.</p>
+</div>
+<div id="if-i-have-multiple-groups-should-i-run-all-together-or-split-into-pairs-of-groups" class="section level2">
+<h2>If I have multiple groups, should I run all together or split into pairs of groups?</h2>
+<p>Typically, we recommend users to run samples from all groups together, and then use the <code>contrast</code> argument of the <em>results</em> function to extract comparisons of interest after fitting the model using <em>DESeq</em>.</p>
+<p>The model fit by <em>DESeq</em> estimates a single dispersion parameter for each gene, which defines how far we expect the observed count for a sample will be from the mean value from the model given its size factor and its condition group. See the section <a href="#theory">above</a> and the DESeq2 paper for full details. Having a single dispersion parameter for each gene is usually sufficient for analyzing multi-group data, as the final dispersion value will incorporate the within-gr [...]
+<p>However, for some datasets, exploratory data analysis (EDA) plots could reveal that one or more groups has much higher within-group variability than the others. A simulated example of such a set of samples is shown below. This is case where, by comparing groups A and B separately – subsetting a <em>DESeqDataSet</em> to only samples from those two groups and then running <em>DESeq</em> on this subset – will be more sensitive than a model including all samples together. It should be not [...]
+<p>Here we diagram an extreme range of within-group variability with a simulated dataset. Typically, it is recommended to run <em>DESeq</em> across samples from all groups, for datasets with multiple groups. However, this simulated dataset shows a case where it would be preferable to compare groups A and B by creating a smaller dataset without the C samples. Group C has much higher within-group variability, which would inflate the per-gene dispersion estimate for groups A and B as well:</p>
+<p><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAAPACAIAAAB1tIfMAAAACXBIWXMAAB2HAAAdhwGP5fFlAAAgAElEQVR4nOzdd3xUVf7/8c+dnkwKCaH3EsJSFkFUQDoLgoiIJaDoKisYYQFdse4uKrqWVVe/6koRLKB0KRakKALSpShFCBBAQugJpJA65f7+mN38sgFCTjLJZC6v5x8+kjvnzvkkDnfeOXPuOZqu6wIAAACgdEyBLgAAAAAIJgRoAAAAQAEBGgAAAFBAgAYAAAAUEKABAAAABQRoAAAAQAEBGgAAAFBAgAYAAAAUEKABAAAABQRoAAAAQAEBGgAAAFBAgAYAAAAUEKABAAAABQRoAAAAQIEl0AWgJKmpqSISEhLidDpF5OLFi3l5eYEuSoHT6bTb7efPnw90IQo0TatevbqIuN3u9PT0QJejwGw2R0VFZWZmFhQUBLoWB [...]
+</div>
+<div id="can-i-run-deseq2-to-contrast-the-levels-of-many-groups" class="section level2">
+<h2>Can I run DESeq2 to contrast the levels of many groups?</h2>
+<p>DESeq2 will work with any kind of design specified using the R formula. We enourage users to consider exploratory data analysis such as principal components analysis rather than performing statistical testing of all pairs of many groups of samples. Statistical testing is one of many ways of describing differences between samples.</p>
+<p>Regarding multiple test correction, if a user is planning to contrast all pairs of many levels, and then selectively reporting the results of only a <em>subset</em> of those pairs, one needs to perform multiple testing across <em>contrasts</em> as well as genes to control for this additional form of multiple testing. This can be done by using the <code>p.adjust</code> function across a long vector of <em>p</em> values from all pairs of contasts, then re-assigning these adjusted <em>p< [...]
+<p>As a speed concern with fitting very large models, note that each additional level of a factor in the design formula adds another parameter to the GLM which is fit by DESeq2. Users might consider first removing genes with very few reads, e.g. genes with row sum of 1, as this will speed up the fitting procedure.</p>
+</div>
+<div id="can-i-use-deseq2-to-analyze-a-dataset-without-replicates" class="section level2">
+<h2>Can I use DESeq2 to analyze a dataset without replicates?</h2>
+<p>If a <em>DESeqDataSet</em> is provided with an experimental design without replicates, a warning is printed, that the samples are treated as replicates for estimation of dispersion. This kind of analysis is only useful for exploring the data, but will not provide the kind of proper statistical inference on differences between groups. Without biological replicates, it is not possible to estimate the biological variability of each gene. More details can be found in the manual page for < [...]
+</div>
+<div id="how-can-i-include-a-continuous-covariate-in-the-design-formula" class="section level2">
+<h2>How can I include a continuous covariate in the design formula?</h2>
+<p>Continuous covariates can be included in the design formula in exactly the same manner as factorial covariates, and then <em>results</em> for the continuous covariate can be extracted by specifying <code>name</code>. Continuous covariates might make sense in certain experiments, where a constant fold change might be expected for each unit of the covariate. However, in many cases, more meaningful results can be obtained by cutting continuous covariates into a factor defined over a smal [...]
+</div>
+<div id="i-ran-a-likelihood-ratio-test-but-results-only-gives-me-one-comparison." class="section level2">
+<h2>I ran a likelihood ratio test, but results() only gives me one comparison.</h2>
+<p>“… How do I get the <em>p</em> values for all of the variables/levels that were removed in the reduced design?”</p>
+<p>This is explained in the help page for <code>?results</code> in the section about likelihood ratio test p-values, but we will restate the answer here. When one performs a likelihood ratio test, the <em>p</em> values and the test statistic (the <code>stat</code> column) are values for the test that removes all of the variables which are present in the full design and not in the reduced design. This tests the null hypothesis that all the coefficients from these variables and levels of t [...]
+<p>The likelihood ratio test <em>p</em> values therefore represent a test of <em>all the variables and all the levels of factors</em> which are among these variables. However, the results table only has space for one column of log fold change, so a single variable and a single comparison is shown (among the potentially multiple log fold changes which were tested in the likelihood ratio test). This is indicated at the top of the results table with the text, e.g., log2 fold change (MLE): c [...]
+</div>
+<div id="what-are-the-exact-steps-performed-by-deseq" class="section level2">
+<h2>What are the exact steps performed by DESeq()?</h2>
+<p>See the manual page for <em>DESeq</em>, which links to the subfunctions which are called in order, where complete details are listed. Also you can read the three steps listed in the <a href="#theory">the DESeq2 model</a> in this document.</p>
+</div>
+<div id="is-there-an-official-galaxy-tool-for-deseq2" class="section level2">
+<h2>Is there an official Galaxy tool for DESeq2?</h2>
+<p>Yes. The repository for the DESeq2 tool is</p>
+<p><a href="https://github.com/galaxyproject/tools-iuc/tree/master/tools/deseq2" class="uri">https://github.com/galaxyproject/tools-iuc/tree/master/tools/deseq2</a></p>
+<p>and a link to its location in the Tool Shed is</p>
+<p><a href="https://toolshed.g2.bx.psu.edu/view/iuc/deseq2/d983d19fbbab" class="uri">https://toolshed.g2.bx.psu.edu/view/iuc/deseq2/d983d19fbbab</a>.</p>
+</div>
+<div id="i-want-to-benchmark-deseq2-comparing-to-other-de-tools." class="section level2">
+<h2>I want to benchmark DESeq2 comparing to other DE tools.</h2>
+<p>One aspect which can cause problems for comparison is that, by default, DESeq2 outputs <code>NA</code> values for adjusted <em>p</em> values based on independent filtering of genes which have low counts. This is a way for the DESeq2 to give extra information on why the adjusted <em>p</em> value for this gene is not small. Additionally, <em>p</em> values can be set to <code>NA</code> based on extreme count outlier detection. These <code>NA</code> values should be considered <em>negativ [...]
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r">res$padj <-<span class="st"> </span><span class="kw">ifelse</span>(<span class="kw">is.na</span>(res$padj), <span class="dv">1</span>, res$padj)</code></pre></div>
+</div>
+<div id="i-have-trouble-installing-deseq2-on-ubuntulinux" class="section level2">
+<h2>I have trouble installing DESeq2 on Ubuntu/Linux…</h2>
+<p>“<em>I try to install DESeq2 using biocLite(), but I get an error trying to install the R packages XML and/or RCurl:</em>”</p>
+<p><code>ERROR: configuration failed for package XML</code></p>
+<p><code>ERROR: configuration failed for package RCurl</code></p>
+<p>You need to install the following devel versions of packages using your standard package manager, e.g. <code>sudo apt-get install</code> or <code>sudo apt install</code></p>
+<ul>
+<li>libxml2-dev</li>
+<li>libcurl4-openssl-dev</li>
+</ul>
+</div>
+</div>
+<div id="acknowledgments" class="section level1">
+<h1>Acknowledgments</h1>
+<p>We have benefited in the development of DESeq2 from the help and feedback of many individuals, including but not limited to:</p>
+<p>The Bionconductor Core Team, Alejandro Reyes, Andrzej Oles, Aleksandra Pekowska, Felix Klein, Nikolaos Ignatiadis, Vince Carey, Owen Solberg, Ruping Sun, Devon Ryan, Steve Lianoglou, Jessica Larson, Christina Chaivorapol, Pan Du, Richard Bourgon, Willem Talloen, Elin Videvall, Hanneke van Deutekom, Todd Burwell, Jesse Rowley, Igor Dolgalev, Stephen Turner, Ryan C Thompson, Tyr Wiesner-Hanks, Konrad Rudolph, David Robinson, Mingxiang Teng, Mathias Lesche, Sonali Arora, Jordan Ramilowsk [...]
+</div>
+<div id="session-info" class="section level1">
+<h1>Session info</h1>
+<div class="sourceCode"><pre class="sourceCode r"><code class="sourceCode r"><span class="kw">sessionInfo</span>()</code></pre></div>
+<pre><code>## R version 3.4.0 (2017-04-21)
+## Platform: x86_64-pc-linux-gnu (64-bit)
+## Running under: Ubuntu 16.04.2 LTS
+## 
+## Matrix products: default
+## BLAS: /home/biocbuild/bbs-3.5-bioc/R/lib/libRblas.so
+## LAPACK: /home/biocbuild/bbs-3.5-bioc/R/lib/libRlapack.so
+## 
+## locale:
+##  [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C              
+##  [3] LC_TIME=en_US.UTF-8        LC_COLLATE=C              
+##  [5] LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
+##  [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                 
+##  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
+## [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
+## 
+## attached base packages:
+## [1] parallel  stats4    stats     graphics  grDevices utils     datasets 
+## [8] methods   base     
+## 
+## other attached packages:
+##  [1] RColorBrewer_1.1-2         pheatmap_1.0.8            
+##  [3] hexbin_1.27.1              vsn_3.44.0                
+##  [5] ggplot2_2.2.1              IHW_1.4.0                 
+##  [7] airway_0.110.0             pasilla_1.4.0             
+##  [9] DESeq2_1.16.1              SummarizedExperiment_1.6.1
+## [11] DelayedArray_0.2.1         matrixStats_0.52.2        
+## [13] Biobase_2.36.2             GenomicRanges_1.28.1      
+## [15] GenomeInfoDb_1.12.0        IRanges_2.10.0            
+## [17] S4Vectors_0.14.0           BiocGenerics_0.22.0       
+## [19] tximportData_1.4.0         readr_1.1.0               
+## [21] tximport_1.4.0            
+## 
+## loaded via a namespace (and not attached):
+##  [1] splines_3.4.0           Formula_1.2-1          
+##  [3] affy_1.54.0             latticeExtra_0.6-28    
+##  [5] GenomeInfoDbData_0.99.0 slam_0.1-40            
+##  [7] yaml_2.1.14             RSQLite_1.1-2          
+##  [9] backports_1.0.5         lattice_0.20-35        
+## [11] limma_3.32.2            digest_0.6.12          
+## [13] XVector_0.16.0          checkmate_1.8.2        
+## [15] colorspace_1.3-2        preprocessCore_1.38.1  
+## [17] htmltools_0.3.6         Matrix_1.2-10          
+## [19] plyr_1.8.4              XML_3.98-1.7           
+## [21] genefilter_1.58.1       zlibbioc_1.22.0        
+## [23] xtable_1.8-2            scales_0.4.1           
+## [25] affyio_1.46.0           fdrtool_1.2.15         
+## [27] BiocParallel_1.10.1     htmlTable_1.9          
+## [29] tibble_1.3.0            annotate_1.54.0        
+## [31] nnet_7.3-12             lazyeval_0.2.0         
+## [33] survival_2.41-3         magrittr_1.5           
+## [35] memoise_1.1.0           evaluate_0.10          
+## [37] foreign_0.8-68          BiocInstaller_1.26.0   
+## [39] tools_3.4.0             data.table_1.10.4      
+## [41] hms_0.3                 BiocStyle_2.4.0        
+## [43] stringr_1.2.0           munsell_0.4.3          
+## [45] locfit_1.5-9.1          cluster_2.0.6          
+## [47] AnnotationDbi_1.38.0    compiler_3.4.0         
+## [49] grid_3.4.0              RCurl_1.95-4.8         
+## [51] rjson_0.2.15            htmlwidgets_0.8        
+## [53] labeling_0.3            bitops_1.0-6           
+## [55] base64enc_0.1-3         rmarkdown_1.5          
+## [57] gtable_0.2.0            codetools_0.2-15       
+## [59] DBI_0.6-1               R6_2.2.0               
+## [61] gridExtra_2.2.1         knitr_1.15.1           
+## [63] Hmisc_4.0-3             rprojroot_1.2          
+## [65] lpsymphony_1.4.1        stringi_1.1.5          
+## [67] Rcpp_0.12.10            geneplotter_1.54.0     
+## [69] rpart_4.1-11            acepack_1.4.1</code></pre>
+</div>
+<div id="references" class="section level1 unnumbered">
+<h1>References</h1>
+<div id="refs" class="references">
+<div id="ref-Anders:2010:GB">
+<p>Anders, Simon, and Wolfgang Huber. 2010. “Differential Expression Analysis for Sequence Count Data.” <em>Genome Biology</em> 11: R106. <a href="http://genomebiology.com/2010/11/10/R106" class="uri">http://genomebiology.com/2010/11/10/R106</a>.</p>
+</div>
+<div id="ref-Anders:2014:htseq">
+<p>Anders, Simon, Paul Theodor Pyl, and Wolfgang Huber. 2014. “HTSeq – A Python framework to work with high-throughput sequencing data.” <em>Bioinformatics</em>. <a href="http://dx.doi.org/10.1093/bioinformatics/btu638" class="uri">http://dx.doi.org/10.1093/bioinformatics/btu638</a>.</p>
+</div>
+<div id="ref-Bourgon:2010:PNAS">
+<p>Bourgon, Richard, Robert Gentleman, and Wolfgang Huber. 2010. “Independent Filtering Increases Detection Power for High-Throughput Experiments.” <em>PNAS</em> 107 (21): 9546–51. <a href="http://www.pnas.org/content/107/21/9546.long" class="uri">http://www.pnas.org/content/107/21/9546.long</a>.</p>
+</div>
+<div id="ref-Bray2016Near">
+<p>Bray, Nicolas, Harold Pimentel, Pall Melsted, and Lior Pachter. 2016. “Near-Optimal Probabilistic Rna-Seq Quantification.” <em>Nature Biotechnology</em> 34: 525–27. <a href="http://dx.doi.org/10.1038/nbt.3519" class="uri">http://dx.doi.org/10.1038/nbt.3519</a>.</p>
+</div>
+<div id="ref-Brooks2010">
+<p>Brooks, A. N., L. Yang, M. O. Duff, K. D. Hansen, J. W. Park, S. Dudoit, S. E. Brenner, and B. R. Graveley. 2011. “Conservation of an RNA regulatory map between Drosophila and mammals.” <em>Genome Research</em>, 193–202. doi:<a href="https://doi.org/10.1101/gr.108662.110">10.1101/gr.108662.110</a>.</p>
+</div>
+<div id="ref-Cook1977Detection">
+<p>Cook, R. Dennis. 1977. “Detection of Influential Observation in Linear Regression.” <em>Technometrics</em>, February.</p>
+</div>
+<div id="ref-CR">
+<p>Cox, D. R., and N. Reid. 1987. “Parameter orthogonality and approximate conditional inference.” <em>Journal of the Royal Statistical Society, Series B</em> 49 (1): 1–39. <a href="http://www.jstor.org/stable/2345476" class="uri">http://www.jstor.org/stable/2345476</a>.</p>
+</div>
+<div id="ref-sagmb2003">
+<p>Huber, Wolfgang, Anja von Heydebreck, Holger Sültmann, Annemarie Poustka, and Martin Vingron. 2003. “Parameter Estimation for the Calibration and Variance Stabilization of Microarray Data.” <em>Statistical Applications in Genetics and Molecular Biology</em> 2 (1): Article 3.</p>
+</div>
+<div id="ref-Ignatiadis2015">
+<p>Ignatiadis, Nikolaos, Bernd Klaus, Judith Zaugg, and Wolfgang Huber. 2015. “Data-Driven Hypothesis Weighting Increases Detection Power in Big Data Analytics.” <em>BioRxiv</em>. <a href="http://dx.doi.org/10.1101/034330" class="uri">http://dx.doi.org/10.1101/034330</a>.</p>
+</div>
+<div id="ref-Li2011RSEM">
+<p>Li, Bo, and Colin N. Dewey. 2011. “RSEM: accurate transcript quantification from RNA-Seq data with or without a reference genome.” <em>BMC Bioinformatics</em> 12: 323+. doi:<a href="https://doi.org/10.1186/1471-2105-12-3231">10.1186/1471-2105-12-3231</a>.</p>
+</div>
+<div id="ref-Liao2013feature">
+<p>Liao, Y., G. K. Smyth, and W. Shi. 2013. “featureCounts: an efficient general purpose program for assigning sequence reads to genomic features.” <em>Bioinformatics</em>, November.</p>
+</div>
+<div id="ref-Love2014">
+<p>Love, Michael I., Wolfgang Huber, and Simon Anders. 2014. “Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2.” <em>Genome Biology</em> 15 (12): 550. <a href="http://dx.doi.org/10.1186/s13059-014-0550-8" class="uri">http://dx.doi.org/10.1186/s13059-014-0550-8</a>.</p>
+</div>
+<div id="ref-Patro2016Salmon">
+<p>Patro, Rob, Geet Duggal, Michael I. Love, Rafael A. Irizarry, and Carl Kingsford. 2016. “Salmon Provides Accurate, Fast, and Bias-Aware Transcript Expression Estimates Using Dual-Phase Inference.” <em>BioRxiv</em>. <a href="http://biorxiv.org/content/early/2016/08/30/021592" class="uri">http://biorxiv.org/content/early/2016/08/30/021592</a>.</p>
+</div>
+<div id="ref-Patro2014Sailfish">
+<p>Patro, Rob, Stephen M. Mount, and Carl Kingsford. 2014. “Sailfish enables alignment-free isoform quantification from RNA-seq reads using lightweight algorithms.” <em>Nature Biotechnology</em> 32: 462–64. <a href="http://dx.doi.org/10.1038/nbt.2862" class="uri">http://dx.doi.org/10.1038/nbt.2862</a>.</p>
+</div>
+<div id="ref-Robert2015Errors">
+<p>Robert, Christelle, and Mick Watson. 2015. “Errors in RNA-Seq quantification affect genes of relevance to human disease.” <em>Genome Biology</em>. doi:<a href="https://doi.org/10.1186/s13059-015-0734-x">10.1186/s13059-015-0734-x</a>.</p>
+</div>
+<div id="ref-Soneson2015">
+<p>Soneson, Charlotte, Michael I. Love, and Mark Robinson. 2015. “Differential analyses for RNA-seq: transcript-level estimates improve gene-level inferences.” <em>F1000Research</em> 4 (1521). <a href="http://dx.doi.org/10.12688/f1000research.7563.1" class="uri">http://dx.doi.org/10.12688/f1000research.7563.1</a>.</p>
+</div>
+<div id="ref-Tibshirani1988">
+<p>Tibshirani, Robert. 1988. “Estimating Transformations for Regression via Additivity and Variance Stabilization.” <em>Journal of the American Statistical Association</em> 83: 394–405.</p>
+</div>
+<div id="ref-Trapnell2013Differential">
+<p>Trapnell, Cole, David G Hendrickson, Martin Sauvageau, Loyal Goff, John L Rinn, and Lior Pachter. 2013. “Differential analysis of gene regulation at transcript resolution with RNA-seq.” <em>Nature Biotechnology</em>. doi:<a href="https://doi.org/10.1038/nbt.2450">10.1038/nbt.2450</a>.</p>
+</div>
+<div id="ref-Wu2012New">
+<p>Wu, Hao, Chi Wang, and Zhijin Wu. 2012. “A new shrinkage estimator for dispersion improves differential expression detection in RNA-seq data.” <em>Biostatistics</em>, September. Oxford University Press. doi:<a href="https://doi.org/10.1093/biostatistics/kxs033">10.1093/biostatistics/kxs033</a>.</p>
+</div>
+</div>
+</div>
+
+
+
+
+</div>
+
+<script>
+
+// add bootstrap table styles to pandoc tables
+function bootstrapStylePandocTables() {
+  $('tr.header').parent('thead').parent('table').addClass('table table-condensed');
+}
+$(document).ready(function () {
+  bootstrapStylePandocTables();
+});
+
+
+</script>
+
+<!-- dynamically load mathjax for compatibility with self-contained -->
+<script>
+  (function () {
+    var script = document.createElement("script");
+    script.type = "text/javascript";
+    script.src  = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
+    document.getElementsByTagName("head")[0].appendChild(script);
+  })();
+</script>
+
+</body>
+</html>
diff --git a/inst/doc/DESeq2.pdf b/inst/doc/DESeq2.pdf
deleted file mode 100644
index 40d0612..0000000
Binary files a/inst/doc/DESeq2.pdf and /dev/null differ
diff --git a/man/DESeq.Rd b/man/DESeq.Rd
index 16674f1..bc51dbf 100644
--- a/man/DESeq.Rd
+++ b/man/DESeq.Rd
@@ -27,8 +27,9 @@ See \code{\link{estimateDispersions}} for description.}
 \item{betaPrior}{whether or not to put a zero-mean normal prior on
 the non-intercept coefficients 
 See \code{\link{nbinomWaldTest}} for description of the calculation
-of the beta prior. By default, the beta prior is used only for the
-Wald test, but can also be specified for the likelihood ratio test.}
+of the beta prior. In versions \code{>=1.16}, the default is set
+to \code{FALSE}, and shrunken LFCs are obtained afterwards using
+\code{\link{lfcShrink}}.}
 
 \item{full}{for \code{test="LRT"}, the full model formula,
 which is restricted to the formula in \code{design(object)}.
@@ -161,18 +162,21 @@ dds <- DESeqDataSetFromMatrix(cnts, DataFrame(cond), ~ cond)
 dds <- DESeq(dds)
 res <- results(dds)
 
+# moderated log2 fold changes
+resultsNames(dds)
+resLFC <- lfcShrink(dds, coef=2, res=res)
+
 # an alternate analysis: likelihood ratio test
 ddsLRT <- DESeq(dds, test="LRT", reduced= ~ 1)
 resLRT <- results(ddsLRT)
 
 }
-\author{
-Michael Love
-}
 \references{
 Michael I Love, Wolfgang Huber, Simon Anders: Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2. Genome Biology 2014, 15:550. \url{http://dx.doi.org/10.1186/s13059-014-0550-8}
 }
 \seealso{
 \code{\link{nbinomWaldTest}}, \code{\link{nbinomLRT}}
 }
-
+\author{
+Michael Love
+}
diff --git a/man/DESeq2-package.Rd b/man/DESeq2-package.Rd
index 15be6f3..f7e63fc 100644
--- a/man/DESeq2-package.Rd
+++ b/man/DESeq2-package.Rd
@@ -15,9 +15,6 @@ For more detailed information on usage, see the package vignette, by typing
 of the vignette. All support questions should be posted to the Bioconductor
 support site: \url{http://support.bioconductor.org}.
 }
-\author{
-Michael Love, Wolfgang Huber, Simon Anders
-}
 \references{
 DESeq2 reference:
 
@@ -27,5 +24,7 @@ DESeq reference:
 
 Simon Anders, Wolfgang Huber: Differential expression analysis for sequence count data. Genome Biology 2010, 11:106. \url{http://dx.doi.org/10.1186/gb-2010-11-10-r106}
 }
+\author{
+Michael Love, Wolfgang Huber, Simon Anders
+}
 \keyword{package}
-
diff --git a/man/DESeqDataSet.Rd b/man/DESeqDataSet.Rd
index ce02af8..96241c9 100644
--- a/man/DESeqDataSet.Rd
+++ b/man/DESeqDataSet.Rd
@@ -2,10 +2,13 @@
 % Please edit documentation in R/AllClasses.R
 \docType{class}
 \name{DESeqDataSet-class}
+\alias{DESeqDataSet-class}
 \alias{DESeqDataSet}
 \alias{DESeqDataSet-class}
+\alias{DESeqDataSetFromMatrix}
 \alias{DESeqDataSetFromHTSeqCount}
 \alias{DESeqDataSetFromMatrix}
+\alias{DESeqDataSetFromHTSeqCount}
 \alias{DESeqDataSetFromTximport}
 \title{DESeqDataSet object and constructors}
 \usage{
@@ -95,4 +98,3 @@ dds <- DESeqDataSetFromMatrix(countData, DataFrame(condition), ~ condition)
 \references{
 See \url{http://www-huber.embl.de/users/anders/HTSeq} for htseq-count
 }
-
diff --git a/man/DESeqResults.Rd b/man/DESeqResults.Rd
index 20cfd2b..0fec033 100644
--- a/man/DESeqResults.Rd
+++ b/man/DESeqResults.Rd
@@ -2,6 +2,7 @@
 % Please edit documentation in R/AllClasses.R
 \docType{class}
 \name{DESeqResults-class}
+\alias{DESeqResults-class}
 \alias{DESeqResults}
 \alias{DESeqResults-class}
 \title{DESeqResults object and constructor}
@@ -22,4 +23,3 @@ to allow other packages to write methods for results
 objects from the DESeq2 package. It is used by \code{\link{results}}
 to wrap up the results table.
 }
-
diff --git a/man/DESeqTransform.Rd b/man/DESeqTransform.Rd
index eaa5cb4..896055b 100644
--- a/man/DESeqTransform.Rd
+++ b/man/DESeqTransform.Rd
@@ -2,6 +2,7 @@
 % Please edit documentation in R/AllClasses.R
 \docType{class}
 \name{DESeqTransform-class}
+\alias{DESeqTransform-class}
 \alias{DESeqTransform}
 \alias{DESeqTransform-class}
 \title{DESeqTransform object and constructor}
@@ -23,4 +24,3 @@ It is used by \code{\link{rlog}} and
 to wrap up the results into a class for downstream methods,
 such as \code{\link{plotPCA}}.
 }
-
diff --git a/man/coef.Rd b/man/coef.Rd
index bd219c2..e6df156 100644
--- a/man/coef.Rd
+++ b/man/coef.Rd
@@ -39,4 +39,3 @@ coef(dds, SE=TRUE)[1,]
 \author{
 Michael Love
 }
-
diff --git a/man/collapseReplicates.Rd b/man/collapseReplicates.Rd
index f1120d2..e842ce7 100644
--- a/man/collapseReplicates.Rd
+++ b/man/collapseReplicates.Rd
@@ -57,4 +57,3 @@ matchFirstLevel <- dds$sample == levels(dds$sample)[1]
 stopifnot(all(rowSums(counts(dds[,matchFirstLevel])) == counts(ddsColl[,1])))
 
 }
-
diff --git a/man/counts.Rd b/man/counts.Rd
index 61055fe..e504447 100644
--- a/man/counts.Rd
+++ b/man/counts.Rd
@@ -5,6 +5,7 @@
 \alias{counts}
 \alias{counts,DESeqDataSet-method}
 \alias{counts<-,DESeqDataSet,matrix-method}
+\alias{counts}
 \title{Accessors for the 'counts' slot of a DESeqDataSet object.}
 \usage{
 \S4method{counts}{DESeqDataSet}(object, normalized = FALSE,
@@ -39,10 +40,9 @@ dds <- estimateSizeFactors(dds) # run this or DESeq() first
 head(counts(dds, normalized=TRUE))
 
 }
-\author{
-Simon Anders
-}
 \seealso{
 \code{\link{sizeFactors}}, \code{\link{normalizationFactors}}
 }
-
+\author{
+Simon Anders
+}
diff --git a/man/design.Rd b/man/design.Rd
index dd7c02a..2e84d86 100644
--- a/man/design.Rd
+++ b/man/design.Rd
@@ -5,6 +5,7 @@
 \alias{design}
 \alias{design,DESeqDataSet-method}
 \alias{design<-,DESeqDataSet,formula-method}
+\alias{design}
 \title{Accessors for the 'design' slot of a DESeqDataSet object.}
 \usage{
 \S4method{design}{DESeqDataSet}(object)
@@ -28,4 +29,3 @@ dds <- makeExampleDESeqDataSet(m=4)
 design(dds) <- formula(~ 1)
 
 }
-
diff --git a/man/dispersionFunction.Rd b/man/dispersionFunction.Rd
index 119d0f9..9b72870 100644
--- a/man/dispersionFunction.Rd
+++ b/man/dispersionFunction.Rd
@@ -3,9 +3,11 @@
 \docType{methods}
 \name{dispersionFunction}
 \alias{dispersionFunction}
-\alias{dispersionFunction,DESeqDataSet-method}
 \alias{dispersionFunction<-}
+\alias{dispersionFunction}
+\alias{dispersionFunction,DESeqDataSet-method}
 \alias{dispersionFunction<-,DESeqDataSet,function-method}
+\alias{dispersionFunction}
 \title{Accessors for the 'dispersionFunction' slot of a DESeqDataSet object.}
 \usage{
 dispersionFunction(object, ...)
@@ -48,4 +50,3 @@ dispersionFunction(dds)
 \seealso{
 \code{\link{estimateDispersions}}
 }
-
diff --git a/man/dispersions.Rd b/man/dispersions.Rd
index 849c942..f50a2c6 100644
--- a/man/dispersions.Rd
+++ b/man/dispersions.Rd
@@ -3,9 +3,11 @@
 \docType{methods}
 \name{dispersions}
 \alias{dispersions}
-\alias{dispersions,DESeqDataSet-method}
 \alias{dispersions<-}
+\alias{dispersions}
+\alias{dispersions,DESeqDataSet-method}
 \alias{dispersions<-,DESeqDataSet,numeric-method}
+\alias{dispersions}
 \title{Accessor functions for the dispersion estimates in a DESeqDataSet
 object.}
 \usage{
@@ -28,10 +30,9 @@ dispersions(object, ...) <- value
 The dispersions for each row of the DESeqDataSet.  Generally,
 these are set by \code{\link{estimateDispersions}}.
 }
-\author{
-Simon Anders
-}
 \seealso{
 \code{\link{estimateDispersions}}
 }
-
+\author{
+Simon Anders
+}
diff --git a/man/estimateBetaPriorVar.Rd b/man/estimateBetaPriorVar.Rd
index 05c0797..491d2a4 100644
--- a/man/estimateBetaPriorVar.Rd
+++ b/man/estimateBetaPriorVar.Rd
@@ -3,6 +3,7 @@
 \name{estimateBetaPriorVar}
 \alias{estimateBetaPriorVar}
 \alias{estimateMLEForBetaPriorVar}
+\alias{estimateMLEForBetaPriorVar}
 \title{Steps for estimating the beta prior variance}
 \usage{
 estimateBetaPriorVar(object, betaPriorMethod = c("weighted", "quantile"),
@@ -43,4 +44,3 @@ NOTE: \code{estimateBetaPriorVar} returns a numeric vector, not a DESEqDataSet!
 For advanced users: to use these functions, first run \code{estimateMLEForBetaPriorVar}
 and then run \code{estimateBetaPriorVar}.
 }
-
diff --git a/man/estimateDispersions.Rd b/man/estimateDispersions.Rd
index fab24d2..2c87796 100644
--- a/man/estimateDispersions.Rd
+++ b/man/estimateDispersions.Rd
@@ -100,4 +100,3 @@ head(dispersions(dds))
   \item Wu, H., Wang, C. & Wu, Z. A new shrinkage estimator for dispersion improves differential expression detection in RNA-seq data. Biostatistics (2012). \url{http://dx.doi.org/10.1093/biostatistics/kxs033}
 }
 }
-
diff --git a/man/estimateDispersionsGeneEst.Rd b/man/estimateDispersionsGeneEst.Rd
index 8b72693..c61f7f4 100644
--- a/man/estimateDispersionsGeneEst.Rd
+++ b/man/estimateDispersionsGeneEst.Rd
@@ -1,8 +1,11 @@
 % Generated by roxygen2: do not edit by hand
 % Please edit documentation in R/core.R
 \name{estimateDispersionsGeneEst}
-\alias{estimateDispersionsFit}
 \alias{estimateDispersionsGeneEst}
+\alias{estimateDispersionsFit}
+\alias{estimateDispersionsMAP}
+\alias{estimateDispersionsPriorVar}
+\alias{estimateDispersionsFit}
 \alias{estimateDispersionsMAP}
 \alias{estimateDispersionsPriorVar}
 \title{Low-level functions to fit dispersion estimates}
@@ -100,4 +103,3 @@ dispPriorVar <- estimateDispersionsPriorVar(dds)
 \seealso{
 \code{\link{estimateDispersions}}
 }
-
diff --git a/man/estimateSizeFactors.Rd b/man/estimateSizeFactors.Rd
index 660aa4d..bf670e2 100644
--- a/man/estimateSizeFactors.Rd
+++ b/man/estimateSizeFactors.Rd
@@ -7,17 +7,23 @@
 \title{Estimate the size factors for a \code{\link{DESeqDataSet}}}
 \usage{
 \S4method{estimateSizeFactors}{DESeqDataSet}(object, type = c("ratio",
-  "iterate"), locfunc = stats::median, geoMeans, controlGenes, normMatrix)
+  "poscounts", "iterate"), locfunc = stats::median, geoMeans, controlGenes,
+  normMatrix)
 }
 \arguments{
 \item{object}{a DESeqDataSet}
 
-\item{type}{either "ratio" or "iterate". "ratio" uses the standard
-median ratio method introduced in DESeq. The size factor is the
-median ratio of the sample over a pseudosample: for each gene, the geometric mean
-of all samples. "iterate" offers an alternative estimator, which can be
-used even when all genes contain a sample with a zero. This estimator
-iterates between estimating the dispersion with a design of ~1, and
+\item{type}{Method for estimation: either "ratio", "poscounts", or "iterate".
+"ratio" uses the standard median ratio method introduced in DESeq. The size factor is the
+median ratio of the sample over a "pseudosample": for each gene, the geometric mean
+of all samples.
+"poscounts" and "iterate" offer alternative estimators, which can be
+used even when all genes contain a sample with a zero (a problem for the
+default method, as the geometric mean becomes zero, and the ratio undefined).
+The "poscounts" estimator deals with a gene with some zeros, by calculating a
+modified geometric mean by taking the n-th root of the product of the non-zero counts.
+This evolved out of use cases with Paul McMurdie's phyloseq package for metagenomic samples.
+The "iterate" estimator iterates between estimating the dispersion with a design of ~1, and
 finding a size factor vector by numerically optimizing the likelihood
 of the ~1 model.}
 
@@ -87,9 +93,6 @@ dds <- estimateSizeFactors(dds,geoMeans=geoMeans)
 sizeFactors(dds)
 
 }
-\author{
-Simon Anders
-}
 \references{
 Reference for the median ratio method:
 
@@ -98,4 +101,6 @@ Simon Anders, Wolfgang Huber: Differential expression analysis for sequence coun
 \seealso{
 \code{\link{estimateSizeFactorsForMatrix}}
 }
-
+\author{
+Simon Anders
+}
diff --git a/man/estimateSizeFactorsForMatrix.Rd b/man/estimateSizeFactorsForMatrix.Rd
index 14121e1..a308de3 100644
--- a/man/estimateSizeFactorsForMatrix.Rd
+++ b/man/estimateSizeFactorsForMatrix.Rd
@@ -42,10 +42,9 @@ geoMeans <- exp(rowMeans(log(counts(dds))))
 estimateSizeFactorsForMatrix(counts(dds),geoMeans=geoMeans)
 
 }
-\author{
-Simon Anders
-}
 \seealso{
 \code{\link{estimateSizeFactors}}
 }
-
+\author{
+Simon Anders
+}
diff --git a/man/fpkm.Rd b/man/fpkm.Rd
index b03ec96..273e34e 100644
--- a/man/fpkm.Rd
+++ b/man/fpkm.Rd
@@ -80,4 +80,3 @@ fpkm(dds)
 \seealso{
 \code{\link{fpm}}
 }
-
diff --git a/man/fpm.Rd b/man/fpm.Rd
index f1f73bb..6c66a28 100644
--- a/man/fpm.Rd
+++ b/man/fpm.Rd
@@ -59,4 +59,3 @@ colSums(fpm(dds, robust=FALSE))/1e6
 \seealso{
 \code{\link{fpkm}}
 }
-
diff --git a/man/lfcShrink.Rd b/man/lfcShrink.Rd
new file mode 100644
index 0000000..d61982f
--- /dev/null
+++ b/man/lfcShrink.Rd
@@ -0,0 +1,43 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/helper.R
+\name{lfcShrink}
+\alias{lfcShrink}
+\title{Shrink log2 fold changes}
+\usage{
+lfcShrink(dds, coef, contrast, res, type = "normal")
+}
+\arguments{
+\item{dds}{a DESeqDataSet object, which has been run through
+\code{\link{DESeq}}, or at the least, \code{\link{estimateDispersions}}}
+
+\item{coef}{the number of the coefficient (LFC) to shrink,
+consult \code{resultsNames(dds)} after running \code{DESeq(dds, betaPrior=FALSE)}.
+only \code{coef} or \code{contrast} can be specified, not both}
+
+\item{contrast}{see argument description in \code{\link{results}}.
+only \code{coef} or \code{contrast} can be specified, not both}
+
+\item{res}{a DESeqResults object (can be missing)}
+
+\item{type}{at this time, ignored argument, because only one
+shrinkage estimator, but more to come!}
+}
+\value{
+if \code{res} is not missing, a DESeqResults object with
+the \code{log2FoldChange} column replaced with a shrunken LFC.
+If \code{res} is missing, just the shrunken LFC vector.
+}
+\description{
+This function adds shrunken log2 fold changes (LFC) to a
+results table which was run without LFC moderation.
+Note: this function is still being prototyped.
+}
+\examples{
+
+ dds <- makeExampleDESeqDataSet(betaSD=1)
+ dds <- DESeq(dds, betaPrior=FALSE)
+ res <- results(dds)
+ res.shr <- lfcShrink(dds=dds, coef=2, res=res)
+ res.shr <- lfcShrink(dds=dds, contrast=c("condition","B","A"), res=res)
+
+}
diff --git a/man/makeExampleDESeqDataSet.Rd b/man/makeExampleDESeqDataSet.Rd
index 1b8c1d5..cba35cb 100644
--- a/man/makeExampleDESeqDataSet.Rd
+++ b/man/makeExampleDESeqDataSet.Rd
@@ -40,4 +40,3 @@ dds <- makeExampleDESeqDataSet()
 dds
 
 }
-
diff --git a/man/nbinomLRT.Rd b/man/nbinomLRT.Rd
index 5124633..66f6a2b 100644
--- a/man/nbinomLRT.Rd
+++ b/man/nbinomLRT.Rd
@@ -4,9 +4,8 @@
 \alias{nbinomLRT}
 \title{Likelihood ratio test (chi-squared test) for GLMs}
 \usage{
-nbinomLRT(object, full = design(object), reduced, betaPrior = FALSE,
-  betaPriorVar, maxit = 100, useOptim = TRUE, quiet = FALSE,
-  useQR = TRUE)
+nbinomLRT(object, full = design(object), reduced, betaTol = 1e-08,
+  maxit = 100, useOptim = TRUE, quiet = FALSE, useQR = TRUE)
 }
 \arguments{
 \item{object}{a DESeqDataSet}
@@ -19,16 +18,7 @@ alternatively, can be a matrix}
 the full model with a term or terms of interest removed.
 alternatively, can be a matrix}
 
-\item{betaPrior}{whether or not to put a zero-mean normal prior on
-the non-intercept coefficients 
-While the beta prior is used typically, for the Wald test, it can
-also be specified for the likelihood ratio test. For more details
-on the calculation, see \code{\link{nbinomWaldTest}}.}
-
-\item{betaPriorVar}{a vector with length equal to the number of
-model terms including the intercept.
-which if missing is estimated from the rows which do not have any
-zeros}
+\item{betaTol}{control parameter defining convergence}
 
 \item{maxit}{the maximum number of iterations to allow for convergence of the
 coefficient vector}
@@ -70,4 +60,3 @@ res <- results(dds)
 \seealso{
 \code{\link{DESeq}}, \code{\link{nbinomWaldTest}}
 }
-
diff --git a/man/nbinomWaldTest.Rd b/man/nbinomWaldTest.Rd
index 4829d18..477237e 100644
--- a/man/nbinomWaldTest.Rd
+++ b/man/nbinomWaldTest.Rd
@@ -4,9 +4,9 @@
 \alias{nbinomWaldTest}
 \title{Wald test for the GLM coefficients}
 \usage{
-nbinomWaldTest(object, betaPrior, betaPriorVar, modelMatrix = NULL,
-  modelMatrixType, maxit = 100, useOptim = TRUE, quiet = FALSE,
-  useT = FALSE, df, useQR = TRUE)
+nbinomWaldTest(object, betaPrior = FALSE, betaPriorVar, modelMatrix = NULL,
+  modelMatrixType, betaTol = 1e-08, maxit = 100, useOptim = TRUE,
+  quiet = FALSE, useT = FALSE, df, useQR = TRUE)
 }
 \arguments{
 \item{object}{a DESeqDataSet}
@@ -30,6 +30,8 @@ level of factors in addition to an intercept.
 betaPrior must be set to TRUE in order for expanded model matrices
 to be fit.}
 
+\item{betaTol}{control parameter defining convergence}
+
 \item{maxit}{the maximum number of iterations to allow for convergence of the
 coefficient vector}
 
@@ -61,18 +63,37 @@ and dispersion estimates.  See \code{\link{DESeq}} for the GLM formula.
 \details{
 The fitting proceeds as follows: standard maximum likelihood estimates
 for GLM coefficients (synonymous with "beta", "log2 fold change", "effect size")
-are calculated. A zero-centered Normal prior distribution 
-is assumed for the coefficients other than the intercept.
+are calculated.
+Then, optionally, a zero-centered Normal prior distribution 
+(\code{betaPrior}) is assumed for the coefficients other than the intercept.
+
+Note that this posterior log2 fold change
+estimation is now not the default setting for \code{nbinomWaldTest},
+as the standard workflow for coefficient shrinkage has moved to
+an additional function \code{link{lfcShrink}}.
+
+For calculating Wald test p-values, the coefficients are scaled by their
+standard errors and then compared to a standard Normal distribution. 
+The \code{\link{results}}
+function without any arguments will automatically perform a contrast of the
+last level of the last variable in the design formula over the first level.
+The \code{contrast} argument of the \code{\link{results}} function can be used
+to generate other comparisons.
+ 
+The Wald test can be replaced with the \code{\link{nbinomLRT}}
+for an alternative test of significance.
+
+Notes on the log2 fold change prior:
+
 The variance of the prior distribution for each
 non-intercept coefficient is calculated using the observed
 distribution of the maximum likelihood coefficients.  
 The final coefficients are then maximum a posteriori estimates
-using this prior (Tikhonov/ridge regularization). See below for details on the
+using this prior (Tikhonov/ridge regularization). 
+See below for details on the
 prior variance and the Methods section of the DESeq2 manuscript for more detail.
 The use of a prior has little effect on genes with high counts and helps to
 moderate the large spread in coefficients for genes with low counts.
-For calculating Wald test p-values, the coefficients are scaled by their
-standard errors and then compared to a standard Normal distribution.
 
 The prior variance is calculated by matching the 0.05 upper quantile
 of the observed MLE coefficients to a zero-centered Normal distribution.
@@ -86,11 +107,7 @@ that noisy estimates of log fold changes from small count genes do not
 overly influence the calculation of the prior variance.
 See \code{\link{estimateBetaPriorVar}}.
 The final prior variance for a factor level is the average of the
-estimated prior variance over all contrasts of all levels of the factor. 
-Another change since the 2014 paper: when interaction terms are present
-in the design, the prior on log fold changes is turned off
-(for more details, see the vignette section, "Methods changes since
-the 2014 DESeq2 paper").
+estimated prior variance over all contrasts of all levels of the factor.
 
 When a log2 fold change prior is used (betaPrior=TRUE),
 then \code{nbinomWaldTest} will by default use expanded model matrices,
@@ -99,14 +116,7 @@ is used to override the default behavior.
 This ensures that log2 fold changes will be independent of the choice
 of reference level. In this case, the beta prior variance for each factor
 is calculated as the average of the mean squared maximum likelihood
-estimates for each level and every possible contrast. The \code{\link{results}}
-function without any arguments will automatically perform a contrast of the
-last level of the last variable in the design formula over the first level.
-The \code{contrast} argument of the \code{\link{results}} function can be used
-to generate other comparisons.
- 
-The Wald test can be replaced with the \code{\link{nbinomLRT}}
-for an alternative test of significance.
+estimates for each level and every possible contrast.
 }
 \examples{
 
@@ -120,4 +130,3 @@ res <- results(dds)
 \seealso{
 \code{\link{DESeq}}, \code{\link{nbinomLRT}}
 }
-
diff --git a/man/normTransform.Rd b/man/normTransform.Rd
index 8e832a5..6fc7094 100644
--- a/man/normTransform.Rd
+++ b/man/normTransform.Rd
@@ -20,4 +20,3 @@ object after applying: f(count + pc).
 \seealso{
 \code{\link{varianceStabilizingTransformation}}, \code{\link{rlog}}
 }
-
diff --git a/man/normalizationFactors.Rd b/man/normalizationFactors.Rd
index 6574a75..af6abbf 100644
--- a/man/normalizationFactors.Rd
+++ b/man/normalizationFactors.Rd
@@ -3,9 +3,11 @@
 \docType{methods}
 \name{normalizationFactors}
 \alias{normalizationFactors}
-\alias{normalizationFactors,DESeqDataSet-method}
 \alias{normalizationFactors<-}
+\alias{normalizationFactors}
+\alias{normalizationFactors,DESeqDataSet-method}
 \alias{normalizationFactors<-,DESeqDataSet,matrix-method}
+\alias{normalizationFactors}
 \title{Accessor functions for the normalization factors in a DESeqDataSet
 object.}
 \usage{
@@ -61,4 +63,3 @@ normalizationFactors(dds) <- normFactors
 dds <- DESeq(dds)
 
 }
-
diff --git a/man/normalizeGeneLength.Rd b/man/normalizeGeneLength.Rd
index 45bea6f..d935e23 100644
--- a/man/normalizeGeneLength.Rd
+++ b/man/normalizeGeneLength.Rd
@@ -16,4 +16,3 @@ Normalize for gene length using the output of transcript abundance estimators
 This function is deprecated and moved to a new general purpose package,
 tximport, which will be added to Bioconductor.
 }
-
diff --git a/man/plotCounts.Rd b/man/plotCounts.Rd
index 6be972a..3e6d78f 100644
--- a/man/plotCounts.Rd
+++ b/man/plotCounts.Rd
@@ -2,11 +2,11 @@
 % Please edit documentation in R/plots.R
 \name{plotCounts}
 \alias{plotCounts}
-\title{Plot of normalized counts for a single gene on log scale}
+\title{Plot of normalized counts for a single gene}
 \usage{
 plotCounts(dds, gene, intgroup = "condition", normalized = TRUE,
-  transform = FALSE, main, xlab = "group", returnData = FALSE,
-  replaced = FALSE, ...)
+  transform = TRUE, main, xlab = "group", returnData = FALSE,
+  replaced = FALSE, pc, ...)
 }
 \arguments{
 \item{dds}{a \code{DESeqDataSet}}
@@ -18,8 +18,8 @@ plotCounts(dds, gene, intgroup = "condition", normalized = TRUE,
 \item{normalized}{whether the counts should be normalized by size factor
 (default is TRUE)}
 
-\item{transform}{whether to present log2 counts (TRUE) or
-to present the counts on the log scale (FALSE, default)}
+\item{transform}{whether to have log scale y-axis or not.
+defaults to TRUE}
 
 \item{main}{as in 'plot'}
 
@@ -30,10 +30,12 @@ covariates for custom plotting (default is FALSE)}
 
 \item{replaced}{use the outlier-replaced counts if they exist}
 
+\item{pc}{pseudocount for log transform}
+
 \item{...}{arguments passed to plot}
 }
 \description{
-Note: normalized counts plus a pseudocount of 0.5 are shown.
+Normalized counts plus a pseudocount of 0.5 are shown by default.
 }
 \examples{
 
@@ -41,4 +43,3 @@ dds <- makeExampleDESeqDataSet()
 plotCounts(dds, "gene1")
 
 }
-
diff --git a/man/plotDispEsts.Rd b/man/plotDispEsts.Rd
index cce5e1c..c64de9b 100644
--- a/man/plotDispEsts.Rd
+++ b/man/plotDispEsts.Rd
@@ -6,9 +6,9 @@
 \alias{plotDispEsts,DESeqDataSet-method}
 \title{Plot dispersion estimates}
 \usage{
-\S4method{plotDispEsts}{DESeqDataSet}(object, ymin, genecol = "black",
-  fitcol = "red", finalcol = "dodgerblue", legend = TRUE, xlab, ylab,
-  log = "xy", cex = 0.45, ...)
+\S4method{plotDispEsts}{DESeqDataSet}(object, ymin, CV = FALSE,
+  genecol = "black", fitcol = "red", finalcol = "dodgerblue",
+  legend = TRUE, xlab, ylab, log = "xy", cex = 0.45, ...)
 }
 \arguments{
 \item{object}{a DESeqDataSet, with dispersions estimated}
@@ -16,6 +16,12 @@
 \item{ymin}{the lower bound for points on the plot, points beyond this
 are drawn as triangles at ymin}
 
+\item{CV}{logical, whether to plot the asymptotic or biological
+coefficient of variation (the square root of dispersion) on the y-axis.
+As the mean grows to infinity, the square root of dispersion gives
+the coefficient of variation for the counts. Default is \code{FALSE},
+plotting dispersion.}
+
 \item{genecol}{the color for gene-wise dispersion estimates}
 
 \item{fitcol}{the color of the fitted estimates}
@@ -49,4 +55,3 @@ plotDispEsts(dds)
 \author{
 Simon Anders
 }
-
diff --git a/man/plotMA.Rd b/man/plotMA.Rd
index a642e6d..a0961fd 100644
--- a/man/plotMA.Rd
+++ b/man/plotMA.Rd
@@ -5,6 +5,7 @@
 \alias{plotMA}
 \alias{plotMA,DESeqDataSet-method}
 \alias{plotMA,DESeqResults-method}
+\alias{plotMA}
 \title{MA-plot from base means and log fold changes}
 \usage{
 \S4method{plotMA}{DESeqDataSet}(object, alpha = 0.1, main = "",
@@ -26,10 +27,12 @@ individual functions \code{\link{nbinomWaldTest}} or \code{\link{nbinomLRT}}}
 
 \item{ylim}{optional y limits}
 
-\item{MLE}{whether to plot the MLE (unshrunken estimates), defaults to FALSE.
+\item{MLE}{if \code{betaPrior=TRUE} was used,
+whether to plot the MLE (unshrunken estimates), defaults to FALSE.
 Requires that \code{\link{results}} was run with \code{addMLE=TRUE}.
-Note that the MLE will be plotted regardless of this argument, if DESeq() was run
-with \code{betaPrior=FALSE}.}
+Note that the MLE will be plotted regardless of this argument,
+if DESeq() was run with \code{betaPrior=FALSE}. See \code{\link{lfcShrink}}
+for examples on how to plot shrunken log2 fold changes.}
 
 \item{...}{further arguments passed to \code{plotMA} if object
 is \code{DESeqResults} or to \code{\link{results}} if object is
@@ -62,4 +65,3 @@ plotMA(res)
 \author{
 Michael Love
 }
-
diff --git a/man/plotPCA.Rd b/man/plotPCA.Rd
index c69592f..fd69c03 100644
--- a/man/plotPCA.Rd
+++ b/man/plotPCA.Rd
@@ -57,4 +57,3 @@ plotPCA( DESeqTransform( se ) )
 \author{
 Wolfgang Huber
 }
-
diff --git a/man/plotSparsity.Rd b/man/plotSparsity.Rd
index b537b2f..60e9dbd 100644
--- a/man/plotSparsity.Rd
+++ b/man/plotSparsity.Rd
@@ -27,4 +27,3 @@ dds <- estimateSizeFactors(dds)
 plotSparsity(dds)
 
 }
-
diff --git a/man/replaceOutliers.Rd b/man/replaceOutliers.Rd
index 7370459..97587a5 100644
--- a/man/replaceOutliers.Rd
+++ b/man/replaceOutliers.Rd
@@ -3,6 +3,7 @@
 \name{replaceOutliers}
 \alias{replaceOutliers}
 \alias{replaceOutliersWithTrimmedMean}
+\alias{replaceOutliersWithTrimmedMean}
 \title{Replace outliers with trimmed mean}
 \usage{
 replaceOutliers(object, trim = 0.2, cooksCutoff, minReplicates = 7,
@@ -69,4 +70,3 @@ sections of the vignette: 'Dealing with count outliers' and 'Count outlier detec
 \seealso{
 \code{\link{DESeq}}
 }
-
diff --git a/man/results.Rd b/man/results.Rd
index a973dd8..8a61ed5 100644
--- a/man/results.Rd
+++ b/man/results.Rd
@@ -1,9 +1,11 @@
 % Generated by roxygen2: do not edit by hand
 % Please edit documentation in R/results.R
 \name{results}
-\alias{removeResults}
 \alias{results}
 \alias{resultsNames}
+\alias{removeResults}
+\alias{resultsNames}
+\alias{removeResults}
 \title{Extract results from a DESeq analysis}
 \usage{
 results(object, contrast, name, lfcThreshold = 0,
@@ -119,10 +121,13 @@ the range of each gene will be returned}
 the one exception is after \code{nbinomLRT} has been run, \code{test="Wald"}
 will generate Wald statistics and Wald test p-values.}
 
-\item{addMLE}{whether the "unshrunken" maximum likelihood estimates (MLE)
+\item{addMLE}{if \code{betaPrior=TRUE} was used,
+whether the "unshrunken" maximum likelihood estimates (MLE)
 of log2 fold change should be added as a column to the results table (default is FALSE).
-only applicable when a beta prior was used during the model fitting. only implemented
-for 'contrast' for three element character vectors or 'name' for interactions.}
+This argument is preserved for backward compatability, as now the
+recommended pipeline is to generate shrunken MAP estimates using \code{\link{lfcShrink}}.
+This argument functionality is only implemented for \code{contrast}
+specified as three element character vectors.}
 
 \item{tidy}{whether to output the results table with rownames as a first column 'row'.
 the table will also be coerced to \code{data.frame}}
@@ -319,4 +324,3 @@ PNAS (2010), \url{http://dx.doi.org/10.1073/pnas.0914005107}
 \seealso{
 \code{\link{DESeq}}, \code{\link[genefilter]{filtered_R}}
 }
-
diff --git a/man/rlog.Rd b/man/rlog.Rd
index 8bb50a8..0f7ea20 100644
--- a/man/rlog.Rd
+++ b/man/rlog.Rd
@@ -3,6 +3,7 @@
 \name{rlog}
 \alias{rlog}
 \alias{rlogTransformation}
+\alias{rlogTransformation}
 \title{Apply a 'regularized log' transformation}
 \usage{
 rlog(object, blind = TRUE, intercept, betaPriorVar, fitType = "parametric")
@@ -133,4 +134,3 @@ Michael I Love, Wolfgang Huber, Simon Anders: Moderated estimation of fold chang
 \seealso{
 \code{\link{plotPCA}}, \code{\link{varianceStabilizingTransformation}}, \code{\link{normTransform}}
 }
-
diff --git a/man/show.Rd b/man/show.Rd
index 12dd77d..8ef80f2 100644
--- a/man/show.Rd
+++ b/man/show.Rd
@@ -20,4 +20,3 @@ standard method.
 \author{
 Michael Love
 }
-
diff --git a/man/sizeFactors.Rd b/man/sizeFactors.Rd
index 3c81eb2..4058b32 100644
--- a/man/sizeFactors.Rd
+++ b/man/sizeFactors.Rd
@@ -5,6 +5,7 @@
 \alias{sizeFactors}
 \alias{sizeFactors,DESeqDataSet-method}
 \alias{sizeFactors<-,DESeqDataSet,numeric-method}
+\alias{sizeFactors}
 \title{Accessor functions for the 'sizeFactors' information in a DESeqDataSet
 object.}
 \usage{
@@ -26,10 +27,9 @@ scale by dividing by the corresponding size factor (as performed by
 See \code{\link{DESeq}} for a description of the use of size factors. If gene-specific normalization
 is desired for each sample, use \code{\link{normalizationFactors}}.
 }
-\author{
-Simon Anders
-}
 \seealso{
 \code{\link{estimateSizeFactors}}
 }
-
+\author{
+Simon Anders
+}
diff --git a/man/summary.Rd b/man/summary.Rd
index 8162b35..32c9706 100644
--- a/man/summary.Rd
+++ b/man/summary.Rd
@@ -11,10 +11,11 @@
 \arguments{
 \item{object}{a \code{\link{DESeqResults}} object}
 
-\item{alpha}{the adjusted p-value cutoff. if not set, this
+\item{alpha}{the adjusted p-value cutoff. If not set, this
 defaults to the \code{alpha} argument which was used in
 \code{\link{results}} to set the target FDR for independent
-filtering.}
+filtering, or if independent filtering was not performed,
+to 0.1.}
 
 \item{...}{additional arguments}
 }
@@ -32,4 +33,3 @@ summary(res)
 \author{
 Michael Love
 }
-
diff --git a/man/unmix.Rd b/man/unmix.Rd
new file mode 100644
index 0000000..285bcae
--- /dev/null
+++ b/man/unmix.Rd
@@ -0,0 +1,43 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/helper.R
+\name{unmix}
+\alias{unmix}
+\title{Unmix samples using loss in a variance stabilized space}
+\usage{
+unmix(x, pure, alpha, shift, loss = 1, quiet = FALSE)
+}
+\arguments{
+\item{x}{normalized counts or TPMs of the samples to be unmixed}
+
+\item{pure}{normalized counts or TPMs of the "pure" samples}
+
+\item{alpha}{for normalized counts, the dispersion of the data
+when a negative binomial model is fit. this can be found by examining
+the asymptotic value of \code{dispersionFunction(dds)}, when using
+\code{fitType="parametric"} or the mean value when using
+\code{fitType="mean"}.}
+
+\item{shift}{for TPMs, the shift which approximately stabilizes the variance
+of log shifted TPMs. Can be assessed with \code{vsn::meanSdPlot}.}
+
+\item{loss}{either 1 (for L1) or 2 (for squared) loss function.
+Default is 1.}
+
+\item{quiet}{suppress progress bar. default is FALSE, show progress bar
+if pbapply is installed.}
+}
+\value{
+mixture components for each sample (rows), which sum to 1.
+}
+\description{
+Unmixes samples in \code{x} according to \code{pure} components,
+using numerical optimization. The components in \code{pure}
+are added on the scale of gene expression (either normalized counts, or TPMs).
+The loss function when comparing fitted expression to the
+samples in \code{x} occurs in a variance stabilized space.
+This task is sometimes referred to as "deconvolution",
+and can be used, for example, to identify contributions from
+various tissues.
+Note: if the \code{pbapply} package is installed a progress bar
+will be displayed while mixing components are fit.
+}
diff --git a/man/varianceStabilizingTransformation.Rd b/man/varianceStabilizingTransformation.Rd
index d8610cf..e5bd867 100644
--- a/man/varianceStabilizingTransformation.Rd
+++ b/man/varianceStabilizingTransformation.Rd
@@ -1,8 +1,9 @@
 % Generated by roxygen2: do not edit by hand
 % Please edit documentation in R/vst.R
 \name{varianceStabilizingTransformation}
-\alias{getVarianceStabilizedData}
 \alias{varianceStabilizingTransformation}
+\alias{getVarianceStabilizedData}
+\alias{getVarianceStabilizedData}
 \title{Apply a variance stabilizing transformation (VST) to the count data}
 \usage{
 varianceStabilizingTransformation(object, blind = TRUE,
@@ -127,9 +128,6 @@ dispersionFunction(ddsNew) <- dispersionFunction(dds)
 vsdNew <- varianceStabilizingTransformation(ddsNew, blind=FALSE)
 
 }
-\author{
-Simon Anders
-}
 \references{
 Reference for the variance stabilizing transformation for counts with a dispersion trend:
 
@@ -138,4 +136,6 @@ Simon Anders, Wolfgang Huber: Differential expression analysis for sequence coun
 \seealso{
 \code{\link{plotPCA}}, \code{\link{rlog}}, \code{\link{normTransform}}
 }
-
+\author{
+Simon Anders
+}
diff --git a/man/vst.Rd b/man/vst.Rd
index 1a98fd2..c4aae78 100644
--- a/man/vst.Rd
+++ b/man/vst.Rd
@@ -40,4 +40,3 @@ dds <- makeExampleDESeqDataSet(n=20000, m=20)
 vsd <- vst(dds)
 
 }
-
diff --git a/src/DESeq2.cpp b/src/DESeq2.cpp
index 8fc4508..5bf6910 100644
--- a/src/DESeq2.cpp
+++ b/src/DESeq2.cpp
@@ -2,7 +2,7 @@
  * DESeq2 C++ functions
  * 
  * Author: Michael I. Love
- * Last modified: September 30, 2015
+ * Last modified: February 22, 2017
  * License: LGPL (>= 3)
  *
  * Note: The canonical, up-to-date DESeq2.cpp lives in 
@@ -27,7 +27,7 @@ using namespace Rcpp;
 // this function returns the log posterior of dispersion parameter alpha, for negative binomial variables
 // given the counts y, the expected means mu, the design matrix x (used for calculating the Cox-Reid adjustment),
 // and the parameters for the normal prior on log alpha
-double log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::NumericMatrix::Row mu, arma::mat x, double log_alpha_prior_mean, double log_alpha_prior_sigmasq, bool use_prior) {
+double log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::NumericMatrix::Row mu, arma::mat x, double log_alpha_prior_mean, double log_alpha_prior_sigmasq, bool usePrior, Rcpp::NumericMatrix::Row weights, bool useWeights) {
   double prior_part;
   double alpha = exp(log_alpha);
   Rcpp::NumericVector w_diag = pow(pow(mu, -1) + alpha, -1);
@@ -35,8 +35,13 @@ double log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numeric
   arma::mat b = x.t() * w * x;
   double cr_term = -0.5 * log(det(b));
   double alpha_neg1 = R_pow_di(alpha, -1);
-  double ll_part = sum(lgamma(y + alpha_neg1) - Rf_lgammafn(alpha_neg1) - y * log(mu + alpha_neg1) - alpha_neg1 * log(1.0 + mu * alpha));
-  if (use_prior) {
+  double ll_part;
+  if (useWeights) {
+    ll_part = sum(weights * (lgamma(y + alpha_neg1) - Rf_lgammafn(alpha_neg1) - y * log(mu + alpha_neg1) - alpha_neg1 * log(1.0 + mu * alpha)));
+  } else {
+    ll_part = sum(lgamma(y + alpha_neg1) - Rf_lgammafn(alpha_neg1) - y * log(mu + alpha_neg1) - alpha_neg1 * log(1.0 + mu * alpha));
+  }
+  if (usePrior) {
     prior_part = -0.5 * R_pow_di(log_alpha - log_alpha_prior_mean,2)/log_alpha_prior_sigmasq;
   } else {
     prior_part = 0.0;
@@ -47,7 +52,7 @@ double log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numeric
 
 // this function returns the derivative of the log posterior with respect to the log of the 
 // dispersion parameter alpha, given the same inputs as the previous function
-double dlog_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::NumericMatrix::Row mu, arma::mat x, double log_alpha_prior_mean, double log_alpha_prior_sigmasq, bool use_prior) {
+double dlog_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::NumericMatrix::Row mu, arma::mat x, double log_alpha_prior_mean, double log_alpha_prior_sigmasq, bool usePrior, Rcpp::NumericMatrix::Row weights, bool useWeights) {
   double prior_part;
   double alpha = exp(log_alpha);
   Rcpp::NumericVector w_diag = pow(pow(mu, -1) + alpha, -1);
@@ -60,9 +65,14 @@ double dlog_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numeri
   double cr_term = -0.5 * ddetb / det(b);
   double alpha_neg1 = R_pow_di(alpha, -1);
   double alpha_neg2 = R_pow_di(alpha, -2);
-  double ll_part = alpha_neg2 * sum(Rf_digamma(alpha_neg1) + log(1 + mu*alpha) - mu*alpha*pow(1.0 + mu*alpha, -1) - digamma(y + alpha_neg1) + y * pow(mu + alpha_neg1, -1));
+  double ll_part;
+  if (useWeights) {
+    ll_part = alpha_neg2 * sum(weights * (Rf_digamma(alpha_neg1) + log(1 + mu*alpha) - mu*alpha*pow(1.0 + mu*alpha, -1) - digamma(y + alpha_neg1) + y * pow(mu + alpha_neg1, -1)));
+  } else {
+    ll_part = alpha_neg2 * sum(Rf_digamma(alpha_neg1) + log(1 + mu*alpha) - mu*alpha*pow(1.0 + mu*alpha, -1) - digamma(y + alpha_neg1) + y * pow(mu + alpha_neg1, -1));
+  }
   // only the prior part is w.r.t log alpha
-  if (use_prior) {
+  if (usePrior) {
     prior_part = -1.0 * (log_alpha - log_alpha_prior_mean)/log_alpha_prior_sigmasq;
   } else {
     prior_part = 0.0;
@@ -74,7 +84,7 @@ double dlog_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numeri
 
 // this function returns the second derivative of the log posterior with respect to the log of the 
 // dispersion parameter alpha, given the same inputs as the previous function
-double d2log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::NumericMatrix::Row mu, arma::mat x, double log_alpha_prior_mean, double log_alpha_prior_sigmasq, bool use_prior) {
+double d2log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::NumericMatrix::Row mu, arma::mat x, double log_alpha_prior_mean, double log_alpha_prior_sigmasq, bool usePrior, Rcpp::NumericMatrix::Row weights, bool useWeights) {
   double prior_part;
   double alpha = exp(log_alpha);
   Rcpp::NumericVector w_diag = pow(pow(mu, -1) + alpha, -1);
@@ -92,9 +102,14 @@ double d2log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numer
   double cr_term = 0.5 * R_pow_di(ddetb/det(b), 2) - 0.5 * d2detb / det(b); 
   double alpha_neg1 = R_pow_di(alpha, -1);
   double alpha_neg2 = R_pow_di(alpha, -2);
-  double ll_part = -2 * R_pow_di(alpha, -3) * sum(Rf_digamma(alpha_neg1) + log(1 + mu*alpha) - mu*alpha*pow(1 + mu*alpha, -1) - digamma(y + alpha_neg1) + y * pow(mu + alpha_neg1, -1)) + alpha_neg2 * sum(-1 * alpha_neg2 * Rf_trigamma(alpha_neg1) + pow(mu, 2) * alpha * pow(1 + mu*alpha, -2) + alpha_neg2 * trigamma(y + alpha_neg1) + alpha_neg2 * y * pow(mu + alpha_neg1, -2));
+  double ll_part;
+  if (useWeights) {
+    ll_part = -2 * R_pow_di(alpha, -3) * sum(weights * (Rf_digamma(alpha_neg1) + log(1 + mu*alpha) - mu*alpha*pow(1 + mu*alpha, -1) - digamma(y + alpha_neg1) + y * pow(mu + alpha_neg1, -1))) + alpha_neg2 * sum(weights * (-1 * alpha_neg2 * Rf_trigamma(alpha_neg1) + pow(mu, 2) * alpha * pow(1 + mu*alpha, -2) + alpha_neg2 * trigamma(y + alpha_neg1) + alpha_neg2 * y * pow(mu + alpha_neg1, -2)));
+  } else {
+    ll_part = -2 * R_pow_di(alpha, -3) * sum(Rf_digamma(alpha_neg1) + log(1 + mu*alpha) - mu*alpha*pow(1 + mu*alpha, -1) - digamma(y + alpha_neg1) + y * pow(mu + alpha_neg1, -1)) + alpha_neg2 * sum(-1 * alpha_neg2 * Rf_trigamma(alpha_neg1) + pow(mu, 2) * alpha * pow(1 + mu*alpha, -2) + alpha_neg2 * trigamma(y + alpha_neg1) + alpha_neg2 * y * pow(mu + alpha_neg1, -2));
+  }
   // only the prior part is w.r.t log alpha
-  if (use_prior) {
+  if (usePrior) {
     prior_part = -1.0/log_alpha_prior_sigmasq; 
   } else {
     prior_part = 0.0;
@@ -102,7 +117,7 @@ double d2log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numer
   // Note: return (d2log_post/dalpha2 * alpha^2 + dlog_post/dalpha * alpha) 
   //            = (d2log_post/dalpha2 * alpha^2 + dlog_post/dlogalpha)
   // because we take derivatives w.r.t log alpha
-  double res = ((ll_part + cr_term) * R_pow_di(alpha, 2) + dlog_posterior(log_alpha, y, mu, x, log_alpha_prior_mean, log_alpha_prior_sigmasq, false)) + prior_part;
+  double res = ((ll_part + cr_term) * R_pow_di(alpha, 2) + dlog_posterior(log_alpha, y, mu, x, log_alpha_prior_mean, log_alpha_prior_sigmasq, false, weights, useWeights)) + prior_part;
   return(res);
 }
 
@@ -110,7 +125,7 @@ double d2log_posterior(double log_alpha, Rcpp::NumericMatrix::Row y, Rcpp::Numer
 // fitting occurs on the scale of log(alpha)
 //
 // [[Rcpp::export]]
-Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP min_log_alphaSEXP, SEXP kappa_0SEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP use_priorSEXP) {
+Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP min_log_alphaSEXP, SEXP kappa_0SEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP usePriorSEXP, SEXP weightsSEXP, SEXP useWeightsSEXP) {
   Rcpp::NumericMatrix y(ySEXP);
   arma::mat x = Rcpp::as<arma::mat>(xSEXP);
   int y_n = y.nrow();
@@ -133,7 +148,10 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
   Rcpp::IntegerVector iter(y_n);
   Rcpp::IntegerVector iter_accept(y_n);
   double tol = Rcpp::as<double>(tolSEXP);
-  bool use_prior = Rcpp::as<bool>(use_priorSEXP);
+  bool usePrior = Rcpp::as<bool>(usePriorSEXP);
+  // observation weights
+  Rcpp::NumericMatrix weights(weightsSEXP);
+  bool useWeights = Rcpp::as<bool>(useWeightsSEXP);
 
   for (int i = 0; i < y_n; i++) {
     Rcpp::checkUserInterrupt();
@@ -146,8 +164,8 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
     // we use a line search based on the Armijo rule.
     // define a function theta(kappa) = f(a + kappa * d), where d is the search direction.
     // in this case the search direction is taken by the first derivative of the log likelihood
-    lp = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
-    dlp = dlog_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+    lp = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
+    dlp = dlog_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
     kappa = kappa_0;
     initial_lp(i) = lp;
     initial_dlp(i) = dlp;
@@ -166,7 +184,7 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
       if (a_propose > 10.0) {
 	kappa = (10.0 - a)/dlp;
       }
-      theta_kappa = -1.0 * log_posterior(a + kappa*dlp, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+      theta_kappa = -1.0 * log_posterior(a + kappa*dlp, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
       theta_hat_kappa = -1.0 * lp - kappa * epsilon * R_pow_di(dlp, 2);
       // if this inequality is true, we have satisfied the Armijo rule and 
       // accept the step size kappa, otherwise we halve kappa
@@ -174,7 +192,7 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
 	// iter_accept counts the number of accepted proposals;
 	iter_accept(i)++;
 	a = a + kappa * dlp;
-	lpnew = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+	lpnew = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
 	// look for change in log likelihood
 	change = lpnew - lp;
 	if (change < tol) {
@@ -187,7 +205,7 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
 	  break;
 	}
 	lp = lpnew;
-	dlp = dlog_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+	dlp = dlog_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
 	// instead of resetting kappa to kappa_0 
 	// multiple kappa by 1.1
 	kappa = fmin(kappa * 1.1, kappa_0);
@@ -203,7 +221,7 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
     }
     last_lp(i) = lp;
     last_dlp(i) = dlp;
-    last_d2lp(i) = d2log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+    last_d2lp(i) = d2log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
     log_alpha(i) = a;
     // last change indicates the change for the final iteration
     last_change(i) = change;
@@ -224,7 +242,7 @@ Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP,
 // note: the betas are on the natural log scale
 //
 // [[Rcpp::export]]
-Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP contrastSEXP, SEXP beta_matSEXP, SEXP lambdaSEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP useQRSEXP) {
+Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP contrastSEXP, SEXP beta_matSEXP, SEXP lambdaSEXP, SEXP weightsSEXP, SEXP useWeightsSEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP useQRSEXP) {
   arma::mat y = Rcpp::as<arma::mat>(ySEXP);
   arma::mat nf = Rcpp::as<arma::mat>(nfSEXP);
   arma::mat x = Rcpp::as<arma::mat>(xSEXP);
@@ -243,6 +261,9 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
   int maxit = Rcpp::as<int>(maxitSEXP);
   arma::colvec yrow, nfrow, beta_hat, mu_hat, z;
   arma::mat w, ridge, sigma;
+  // observation weights
+  arma::mat weights = Rcpp::as<arma::mat>(weightsSEXP);
+  bool useWeights = Rcpp::as<bool>(useWeightsSEXP);
   // vars for QR
   bool useQR = Rcpp::as<bool>(useQRSEXP);
   arma::colvec gamma_hat, big_z;
@@ -273,7 +294,11 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
       // the ridge penalty
       for (int t = 0; t < maxit; t++) {
 	iter(i)++;
-	w = diagmat(mu_hat/(1.0 + alpha_hat[i] * mu_hat));
+	if (useWeights) {
+	  w = diagmat(weights.row(i).t() % mu_hat/(1.0 + alpha_hat(i) * mu_hat));
+	} else {
+	  w = diagmat(mu_hat/(1.0 + alpha_hat(i) * mu_hat));
+	}
 	// prepare matrices
 	weighted_x_ridge = join_cols(sqrt(w) * x, sqrt(ridge));
 	qr(q, r, weighted_x_ridge);
@@ -297,7 +322,11 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
 	dev = 0.0;
 	for (int j = 0; j < y_m; j++) {
 	  // note the order for Rf_dnbinom_mu: x, sz, mu, lg
-	  dev = dev + -2.0 * Rf_dnbinom_mu(yrow[j], 1.0/alpha_hat[i], mu_hat[j], 1);
+	  if (useWeights) {
+	    dev = dev + -2.0 * weights(i,j) * Rf_dnbinom_mu(yrow(j), 1.0/alpha_hat(i), mu_hat(j), 1);
+	  } else {
+	    dev = dev + -2.0 * Rf_dnbinom_mu(yrow(j), 1.0/alpha_hat(i), mu_hat(j), 1);
+	  }
 	}
 	conv_test = fabs(dev - dev_old)/(fabs(dev) + 0.1);
 	if (std::isnan(conv_test)) {
@@ -314,7 +343,11 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
       // and matrix inversion
       for (int t = 0; t < maxit; t++) {
 	iter(i)++;
-	w = diagmat(mu_hat/(1.0 + alpha_hat[i] * mu_hat));
+	if (useWeights) {
+	  w = diagmat(weights.row(i).t() % mu_hat/(1.0 + alpha_hat(i) * mu_hat));
+	} else {
+	  w = diagmat(mu_hat/(1.0 + alpha_hat(i) * mu_hat));
+	}
 	z = arma::log(mu_hat / nfrow) + (yrow - mu_hat) / mu_hat;
 	solve(beta_hat, x.t() * w * x + ridge, x.t() * w * z);
 	if (sum(abs(beta_hat) > large) > 0) {
@@ -328,7 +361,11 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
 	dev = 0.0;
 	for (int j = 0; j < y_m; j++) {
 	  // note the order for Rf_dnbinom_mu: x, sz, mu, lg
-	  dev = dev + -2.0 * Rf_dnbinom_mu(yrow[j], 1.0/alpha_hat[i], mu_hat[j], 1);
+	  if (useWeights) {
+	    dev = dev + -2.0 * weights(i,j) * Rf_dnbinom_mu(yrow(j), 1.0/alpha_hat(i), mu_hat(j), 1);
+	  } else {
+	    dev = dev + -2.0 * Rf_dnbinom_mu(yrow(j), 1.0/alpha_hat(i), mu_hat(j), 1);
+	  }
 	}
 	conv_test = fabs(dev - dev_old)/(fabs(dev) + 0.1);
 	if (std::isnan(conv_test)) {
@@ -344,7 +381,11 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
     deviance(i) = dev;
     beta_mat.row(i) = beta_hat.t();
     // recalculate w so that this is identical if we start with beta_hat
-    w = diagmat(mu_hat/(1.0 + alpha_hat[i] * mu_hat));
+    if (useWeights) {
+      w = diagmat(weights.row(i).t() % mu_hat/(1.0 + alpha_hat(i) * mu_hat));
+    } else {
+      w = diagmat(mu_hat/(1.0 + alpha_hat(i) * mu_hat));
+    }
     hat_matrix = sqrt(w) * x * (x.t() * w * x + ridge).i() * x.t() * sqrt(w);
     hat_diagonals.row(i) = diagvec(hat_matrix).t();
     // sigma is the covariance matrix for the betas
@@ -365,7 +406,7 @@ Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP
 
 
 // [[Rcpp::export]]
-Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP use_priorSEXP) {
+Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP usePriorSEXP, SEXP weightsSEXP, SEXP useWeightsSEXP) {
   Rcpp::NumericMatrix y(ySEXP);
   arma::mat x = Rcpp::as<arma::mat>(xSEXP);
   int y_n = y.nrow();
@@ -374,7 +415,7 @@ Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSE
   int disp_grid_n = disp_grid.n_elem;
   Rcpp::NumericVector log_alpha_prior_mean(log_alpha_prior_meanSEXP);
   double log_alpha_prior_sigmasq = Rcpp::as<double>(log_alpha_prior_sigmasqSEXP);
-  bool use_prior = Rcpp::as<bool>(use_priorSEXP);
+  bool usePrior = Rcpp::as<bool>(usePriorSEXP);
   double a;
   double delta = disp_grid(1) - disp_grid(0);
   double a_hat;
@@ -382,6 +423,10 @@ Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSE
   arma::vec logpostvec = arma::zeros(disp_grid.n_elem);
   arma::vec log_alpha = arma::zeros(y_n);
   arma::uword idxmax;
+  // observation weights
+  Rcpp::NumericMatrix weights(weightsSEXP);
+  bool useWeights = Rcpp::as<bool>(useWeightsSEXP);
+
   for (int i = 0; i < y_n; i++) {
     Rcpp::checkUserInterrupt();
     Rcpp::NumericMatrix::Row yrow = y(i,_);
@@ -389,14 +434,14 @@ Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSE
     for (int t = 0; t < disp_grid_n; t++) {
       // maximize the log likelihood over the variable a, the log of alpha, the dispersion parameter
       a = disp_grid(t);
-      logpostvec(t) = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+      logpostvec(t) = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
     }
     logpostvec.max(idxmax);
     a_hat = disp_grid(idxmax);
     disp_grid_fine = arma::linspace<arma::vec>(a_hat - delta, a_hat + delta, disp_grid_n);
     for (int t = 0; t < disp_grid_n; t++) {
       a = disp_grid_fine(t);
-      logpostvec(t) = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, use_prior);
+      logpostvec(t) = log_posterior(a, yrow, mu_hat_row, x, log_alpha_prior_mean(i), log_alpha_prior_sigmasq, usePrior, weights.row(i), useWeights);
     }
     logpostvec.max(idxmax);
     log_alpha(i) = disp_grid_fine(idxmax);
diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp
index 8a484cf..90704b9 100644
--- a/src/RcppExports.cpp
+++ b/src/RcppExports.cpp
@@ -1,4 +1,4 @@
-// This file was generated by Rcpp::compileAttributes
+// Generated by using Rcpp::compileAttributes() -> do not edit by hand
 // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
 
 #include <RcppArmadillo.h>
@@ -7,11 +7,11 @@
 using namespace Rcpp;
 
 // fitDisp
-Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP min_log_alphaSEXP, SEXP kappa_0SEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP use_priorSEXP);
-RcppExport SEXP DESeq2_fitDisp(SEXP ySEXPSEXP, SEXP xSEXPSEXP, SEXP mu_hatSEXPSEXP, SEXP log_alphaSEXPSEXP, SEXP log_alpha_prior_meanSEXPSEXP, SEXP log_alpha_prior_sigmasqSEXPSEXP, SEXP min_log_alphaSEXPSEXP, SEXP kappa_0SEXPSEXP, SEXP tolSEXPSEXP, SEXP maxitSEXPSEXP, SEXP use_priorSEXPSEXP) {
+Rcpp::List fitDisp(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP log_alphaSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP min_log_alphaSEXP, SEXP kappa_0SEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP usePriorSEXP, SEXP weightsSEXP, SEXP useWeightsSEXP);
+RcppExport SEXP DESeq2_fitDisp(SEXP ySEXPSEXP, SEXP xSEXPSEXP, SEXP mu_hatSEXPSEXP, SEXP log_alphaSEXPSEXP, SEXP log_alpha_prior_meanSEXPSEXP, SEXP log_alpha_prior_sigmasqSEXPSEXP, SEXP min_log_alphaSEXPSEXP, SEXP kappa_0SEXPSEXP, SEXP tolSEXPSEXP, SEXP maxitSEXPSEXP, SEXP usePriorSEXPSEXP, SEXP weightsSEXPSEXP, SEXP useWeightsSEXPSEXP) {
 BEGIN_RCPP
-    Rcpp::RObject __result;
-    Rcpp::RNGScope __rngScope;
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
     Rcpp::traits::input_parameter< SEXP >::type ySEXP(ySEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type xSEXP(xSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type mu_hatSEXP(mu_hatSEXPSEXP);
@@ -22,17 +22,19 @@ BEGIN_RCPP
     Rcpp::traits::input_parameter< SEXP >::type kappa_0SEXP(kappa_0SEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type tolSEXP(tolSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type maxitSEXP(maxitSEXPSEXP);
-    Rcpp::traits::input_parameter< SEXP >::type use_priorSEXP(use_priorSEXPSEXP);
-    __result = Rcpp::wrap(fitDisp(ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP, tolSEXP, maxitSEXP, use_priorSEXP));
-    return __result;
+    Rcpp::traits::input_parameter< SEXP >::type usePriorSEXP(usePriorSEXPSEXP);
+    Rcpp::traits::input_parameter< SEXP >::type weightsSEXP(weightsSEXPSEXP);
+    Rcpp::traits::input_parameter< SEXP >::type useWeightsSEXP(useWeightsSEXPSEXP);
+    rcpp_result_gen = Rcpp::wrap(fitDisp(ySEXP, xSEXP, mu_hatSEXP, log_alphaSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, min_log_alphaSEXP, kappa_0SEXP, tolSEXP, maxitSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP));
+    return rcpp_result_gen;
 END_RCPP
 }
 // fitBeta
-Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP contrastSEXP, SEXP beta_matSEXP, SEXP lambdaSEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP useQRSEXP);
-RcppExport SEXP DESeq2_fitBeta(SEXP ySEXPSEXP, SEXP xSEXPSEXP, SEXP nfSEXPSEXP, SEXP alpha_hatSEXPSEXP, SEXP contrastSEXPSEXP, SEXP beta_matSEXPSEXP, SEXP lambdaSEXPSEXP, SEXP tolSEXPSEXP, SEXP maxitSEXPSEXP, SEXP useQRSEXPSEXP) {
+Rcpp::List fitBeta(SEXP ySEXP, SEXP xSEXP, SEXP nfSEXP, SEXP alpha_hatSEXP, SEXP contrastSEXP, SEXP beta_matSEXP, SEXP lambdaSEXP, SEXP weightsSEXP, SEXP useWeightsSEXP, SEXP tolSEXP, SEXP maxitSEXP, SEXP useQRSEXP);
+RcppExport SEXP DESeq2_fitBeta(SEXP ySEXPSEXP, SEXP xSEXPSEXP, SEXP nfSEXPSEXP, SEXP alpha_hatSEXPSEXP, SEXP contrastSEXPSEXP, SEXP beta_matSEXPSEXP, SEXP lambdaSEXPSEXP, SEXP weightsSEXPSEXP, SEXP useWeightsSEXPSEXP, SEXP tolSEXPSEXP, SEXP maxitSEXPSEXP, SEXP useQRSEXPSEXP) {
 BEGIN_RCPP
-    Rcpp::RObject __result;
-    Rcpp::RNGScope __rngScope;
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
     Rcpp::traits::input_parameter< SEXP >::type ySEXP(ySEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type xSEXP(xSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type nfSEXP(nfSEXPSEXP);
@@ -40,27 +42,31 @@ BEGIN_RCPP
     Rcpp::traits::input_parameter< SEXP >::type contrastSEXP(contrastSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type beta_matSEXP(beta_matSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type lambdaSEXP(lambdaSEXPSEXP);
+    Rcpp::traits::input_parameter< SEXP >::type weightsSEXP(weightsSEXPSEXP);
+    Rcpp::traits::input_parameter< SEXP >::type useWeightsSEXP(useWeightsSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type tolSEXP(tolSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type maxitSEXP(maxitSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type useQRSEXP(useQRSEXPSEXP);
-    __result = Rcpp::wrap(fitBeta(ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP, beta_matSEXP, lambdaSEXP, tolSEXP, maxitSEXP, useQRSEXP));
-    return __result;
+    rcpp_result_gen = Rcpp::wrap(fitBeta(ySEXP, xSEXP, nfSEXP, alpha_hatSEXP, contrastSEXP, beta_matSEXP, lambdaSEXP, weightsSEXP, useWeightsSEXP, tolSEXP, maxitSEXP, useQRSEXP));
+    return rcpp_result_gen;
 END_RCPP
 }
 // fitDispGrid
-Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP use_priorSEXP);
-RcppExport SEXP DESeq2_fitDispGrid(SEXP ySEXPSEXP, SEXP xSEXPSEXP, SEXP mu_hatSEXPSEXP, SEXP disp_gridSEXPSEXP, SEXP log_alpha_prior_meanSEXPSEXP, SEXP log_alpha_prior_sigmasqSEXPSEXP, SEXP use_priorSEXPSEXP) {
+Rcpp::List fitDispGrid(SEXP ySEXP, SEXP xSEXP, SEXP mu_hatSEXP, SEXP disp_gridSEXP, SEXP log_alpha_prior_meanSEXP, SEXP log_alpha_prior_sigmasqSEXP, SEXP usePriorSEXP, SEXP weightsSEXP, SEXP useWeightsSEXP);
+RcppExport SEXP DESeq2_fitDispGrid(SEXP ySEXPSEXP, SEXP xSEXPSEXP, SEXP mu_hatSEXPSEXP, SEXP disp_gridSEXPSEXP, SEXP log_alpha_prior_meanSEXPSEXP, SEXP log_alpha_prior_sigmasqSEXPSEXP, SEXP usePriorSEXPSEXP, SEXP weightsSEXPSEXP, SEXP useWeightsSEXPSEXP) {
 BEGIN_RCPP
-    Rcpp::RObject __result;
-    Rcpp::RNGScope __rngScope;
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
     Rcpp::traits::input_parameter< SEXP >::type ySEXP(ySEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type xSEXP(xSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type mu_hatSEXP(mu_hatSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type disp_gridSEXP(disp_gridSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type log_alpha_prior_meanSEXP(log_alpha_prior_meanSEXPSEXP);
     Rcpp::traits::input_parameter< SEXP >::type log_alpha_prior_sigmasqSEXP(log_alpha_prior_sigmasqSEXPSEXP);
-    Rcpp::traits::input_parameter< SEXP >::type use_priorSEXP(use_priorSEXPSEXP);
-    __result = Rcpp::wrap(fitDispGrid(ySEXP, xSEXP, mu_hatSEXP, disp_gridSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, use_priorSEXP));
-    return __result;
+    Rcpp::traits::input_parameter< SEXP >::type usePriorSEXP(usePriorSEXPSEXP);
+    Rcpp::traits::input_parameter< SEXP >::type weightsSEXP(weightsSEXPSEXP);
+    Rcpp::traits::input_parameter< SEXP >::type useWeightsSEXP(useWeightsSEXPSEXP);
+    rcpp_result_gen = Rcpp::wrap(fitDispGrid(ySEXP, xSEXP, mu_hatSEXP, disp_gridSEXP, log_alpha_prior_meanSEXP, log_alpha_prior_sigmasqSEXP, usePriorSEXP, weightsSEXP, useWeightsSEXP));
+    return rcpp_result_gen;
 END_RCPP
 }
diff --git a/tests/testthat/test_1vs1.R b/tests/testthat/test_1vs1.R
index 15d8653..a3d64e8 100644
--- a/tests/testthat/test_1vs1.R
+++ b/tests/testthat/test_1vs1.R
@@ -1,3 +1,6 @@
-dds <- makeExampleDESeqDataSet(n=100, m=2)
-expect_warning({ dds <- DESeq(dds)})
-res <- results(dds)
+context("1vs1")
+test_that("1 vs 1 gets warning", {
+  dds <- makeExampleDESeqDataSet(n=100, m=2)
+  expect_warning({ dds <- DESeq(dds)})
+  res <- results(dds)
+})
diff --git a/tests/testthat/test_DESeq.R b/tests/testthat/test_DESeq.R
index 36bb83f..ffd5a47 100644
--- a/tests/testthat/test_DESeq.R
+++ b/tests/testthat/test_DESeq.R
@@ -1,17 +1,20 @@
-dds <- makeExampleDESeqDataSet(n=100, m=8)
-expect_error(DESeq(dds, test="LRT"))
-expect_error(DESeq(dds, test="Wald", full=~condition, reduced=~1))
-expect_error(DESeq(dds, full=~1))
-
-m <- model.matrix(~ condition, colData(dds))
-expect_error(DESeq(dds, test="LRT", full=m, reduced=~1))
-expect_error(DESeq(dds, test="LRT", full=m, reduced=m))
-expect_error(DESeq(dds, full=m, betaPrior=TRUE))
-
-design(dds) <- ~ 0 + condition
-expect_error(DESeq(dds, betaPrior=TRUE))
-
-dds <- makeExampleDESeqDataSet(n=100)
-dds$condition <- factor(rep(c("A","B","C"),each=4))
-dds <- dds[,1:8]
-expect_error(DESeq(dds))
+context("DESeq")
+test_that("DESeq() gives correct errors", {
+  dds <- makeExampleDESeqDataSet(n=100, m=8)
+  expect_error(DESeq(dds, test="LRT"))
+  expect_error(DESeq(dds, test="Wald", full=~condition, reduced=~1))
+  expect_error(DESeq(dds, full=~1))
+  
+  m <- model.matrix(~ condition, colData(dds))
+  expect_error(DESeq(dds, test="LRT", full=m, reduced=~1))
+  expect_error(DESeq(dds, test="LRT", full=m, reduced=m))
+  expect_error(DESeq(dds, full=m, betaPrior=TRUE))
+  
+  design(dds) <- ~ 0 + condition
+  expect_error(DESeq(dds, betaPrior=TRUE))
+  
+  dds <- makeExampleDESeqDataSet(n=100)
+  dds$condition <- factor(rep(c("A","B","C"),each=4))
+  dds <- dds[,1:8]
+  expect_error(DESeq(dds))
+})
diff --git a/tests/testthat/test_LRT.R b/tests/testthat/test_LRT.R
index a10e8fe..0d576de 100644
--- a/tests/testthat/test_LRT.R
+++ b/tests/testthat/test_LRT.R
@@ -1,8 +1,12 @@
-dds <- makeExampleDESeqDataSet(n=100, m=4)
-dds$group <- factor(c(1,2,1,2))
-design(dds) <- ~ condition
-expect_error(DESeq(dds, test="LRT", reduced=~group))
-expect_error(DESeq(dds, test="LRT", reduced=~1, modelMatrixType="expanded"))
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersions(dds)
-expect_error(nbinomLRT(dds))
+context("LRT")
+test_that("test='LRT' gives correct errors", {
+  dds <- makeExampleDESeqDataSet(n=100, m=4)
+  dds$group <- factor(c(1,2,1,2))
+  design(dds) <- ~ condition
+  expect_error(DESeq(dds, test="LRT", reduced=~group))
+  expect_error(DESeq(dds, test="LRT", reduced=~1, modelMatrixType="expanded"))
+  expect_error(DESeq(dds,test="LRT",reduced=~group, betaPrior=TRUE))
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersions(dds)
+  expect_error(nbinomLRT(dds))
+})
diff --git a/tests/testthat/test_LRT_prior.R b/tests/testthat/test_LRT_prior.R
deleted file mode 100644
index 56c623c..0000000
--- a/tests/testthat/test_LRT_prior.R
+++ /dev/null
@@ -1,13 +0,0 @@
-dds <- makeExampleDESeqDataSet(n=100)
-colData(dds)$condition <- factor(rep(1:3,each=4))
-colData(dds)$group <- factor(rep(1:2,6))
-design(dds) <- ~ group + condition
-dds <- DESeq(dds,test="LRT",reduced=~ group,betaPrior=TRUE)
-expect_true(any(attr(dds,"betaPriorVar") < 1e6))
-res <- results(dds)
-expect_true(grepl("LRT",mcols(res)$description[colnames(res) == "stat"]))
-
-design(dds) <- ~ group * condition
-expect_error(dds <- DESeq(dds,test="LRT",reduced=~ group,betaPrior=TRUE))
-
-
diff --git a/tests/testthat/test_QR.R b/tests/testthat/test_QR.R
index 6255184..789b6b2 100644
--- a/tests/testthat/test_QR.R
+++ b/tests/testthat/test_QR.R
@@ -1,8 +1,10 @@
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100,betaSD=1)
-dds <- DESeq(dds)
-ddsNoQR <- nbinomWaldTest(dds,useQR=FALSE)
-res <- results(dds)
-resNoQR <- results(ddsNoQR)
-expect_equal(res$log2FoldChange, resNoQR$log2FoldChange, tolerance=1e-6)
-
+context("QR")
+test_that("not using QR works as expected", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100,betaSD=1)
+  dds <- DESeq(dds, quiet=TRUE)
+  ddsNoQR <- nbinomWaldTest(dds, useQR=FALSE)
+  res <- results(dds)
+  resNoQR <- results(ddsNoQR)
+  expect_equal(res$log2FoldChange, resNoQR$log2FoldChange, tolerance=1e-6)
+})
diff --git a/tests/testthat/test_addMLE.R b/tests/testthat/test_addMLE.R
index c09ee64..e998f3c 100644
--- a/tests/testthat/test_addMLE.R
+++ b/tests/testthat/test_addMLE.R
@@ -1,18 +1,20 @@
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=200,m=12,betaSD=1)
-dds$condition <- factor(rep(letters[1:3],each=4))
-dds <- DESeq(dds)
-ddsNP <- nbinomWaldTest(dds, betaPrior=FALSE)
+context("addMLE")
+test_that("adding MLE works as expected", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=200,m=12,betaSD=1)
+  dds$condition <- factor(rep(letters[1:3],each=4))
+  dds <- DESeq(dds, betaPrior=TRUE)
+  ddsNP <- nbinomWaldTest(dds, betaPrior=FALSE)
 
-res1 <- results(dds, contrast=c("condition","c","a"), addMLE=TRUE)
-res2 <- results(ddsNP, contrast=c("condition","c","a"))
-expect_equal(res1$lfcMLE, res2$log2FoldChange)
+  res1 <- results(dds, contrast=c("condition","c","a"), addMLE=TRUE)
+  res2 <- results(ddsNP, contrast=c("condition","c","a"))
+  expect_equal(res1$lfcMLE, res2$log2FoldChange)
 
-res1 <- results(dds, contrast=c("condition","a","b"), addMLE=TRUE)
-res2 <- results(ddsNP, contrast=c("condition","a","b"))
-expect_equal(res1$lfcMLE, res2$log2FoldChange)
-
-res1 <- results(dds, contrast=c("condition","c","b"), addMLE=TRUE)
-res2 <- results(ddsNP, contrast=c("condition","c","b"))
-expect_equal(res1$lfcMLE, res2$log2FoldChange)
+  res1 <- results(dds, contrast=c("condition","a","b"), addMLE=TRUE)
+  res2 <- results(ddsNP, contrast=c("condition","a","b"))
+  expect_equal(res1$lfcMLE, res2$log2FoldChange)
 
+  res1 <- results(dds, contrast=c("condition","c","b"), addMLE=TRUE)
+  res2 <- results(ddsNP, contrast=c("condition","c","b"))
+  expect_equal(res1$lfcMLE, res2$log2FoldChange)
+})
diff --git a/tests/testthat/test_betaFitting.R b/tests/testthat/test_betaFitting.R
index 1575cbe..494da1f 100644
--- a/tests/testthat/test_betaFitting.R
+++ b/tests/testthat/test_betaFitting.R
@@ -1,44 +1,47 @@
-# test for equivalence of DESeq2 estimates with those
-# found using IRLS code and using optim
-m <- 10
-set.seed(1)
-y <- rpois(m,20)
-sf <- rep(1,m)
-condition <- factor(rep(0:1,each=m/2))
-x <- cbind(rep(1,m),rep(0:1,each=m/2))
-lambda <- 2
-alpha <- .5
+context("betaFitting")
+test_that("estimates of beta fit from various methods are equal", {
+  # test for equivalence of DESeq2 estimates with those
+  # found using IRLS code and using optim
+  m <- 10
+  set.seed(1)
+  y <- rpois(m,20)
+  sf <- rep(1,m)
+  condition <- factor(rep(0:1,each=m/2))
+  x <- cbind(rep(1,m),rep(0:1,each=m/2))
+  lambda <- 2
+  alpha <- .5
 
-dds <- DESeqDataSetFromMatrix(matrix(y,nrow=1),
-                              colData=DataFrame(condition),
-                              design= ~ condition)
-sizeFactors(dds) <- sf
-dispersions(dds) <- alpha
-mcols(dds)$baseMean <- mean(y)
+  dds <- DESeqDataSetFromMatrix(matrix(y,nrow=1),
+                                colData=DataFrame(condition),
+                                design= ~ condition)
+  sizeFactors(dds) <- sf
+  dispersions(dds) <- alpha
+  mcols(dds)$baseMean <- mean(y)
 
-# for testing we convert beta to the naturual log scale:
-# convert lambda from log to log2 scale by multiplying by log(2)^2
-# then convert beta back from log2 to log scale by multiplying by log(2)
-betaDESeq <- log(2)*DESeq2:::fitNbinomGLMs(dds, lambda=c(0,lambda*log(2)^2))$betaMatrix
+  # for testing we convert beta to the naturual log scale:
+  # convert lambda from log to log2 scale by multiplying by log(2)^2
+  # then convert beta back from log2 to log scale by multiplying by log(2)
+  betaDESeq <- log(2)*DESeq2:::fitNbinomGLMs(dds, lambda=c(0,lambda*log(2)^2))$betaMatrix
 
-# the IRLS algorithm
-betaIRLS <- c(1,1)
-for (t in 1:100) {
-  mu.hat <- as.vector(sf * exp(x %*% betaIRLS))
-  w <- diag(1/(1/mu.hat^2 * ( mu.hat + alpha * mu.hat^2 )))
-  z <- log(mu.hat/sf) + (y - mu.hat)/mu.hat
-  ridge <- diag(c(0,lambda))
-  betaIRLS <- as.vector(solve(t(x) %*% w %*% x + ridge) %*% t(x) %*% w %*% z)
-}
+  # the IRLS algorithm
+  betaIRLS <- c(1,1)
+  for (t in 1:100) {
+    mu.hat <- as.vector(sf * exp(x %*% betaIRLS))
+    w <- diag(1/(1/mu.hat^2 * ( mu.hat + alpha * mu.hat^2 )))
+    z <- log(mu.hat/sf) + (y - mu.hat)/mu.hat
+    ridge <- diag(c(0,lambda))
+    betaIRLS <- as.vector(solve(t(x) %*% w %*% x + ridge) %*% t(x) %*% w %*% z)
+  }
 
-# using optim
-objectiveFn <- function(p) {
-  mu <- exp(x %*% p)
-  logLike <- sum(dnbinom(y, mu=mu, size=1/alpha, log=TRUE))
-  prior <- dnorm(p[2], 0, sqrt(1/lambda),log=TRUE)
-  -1 * (logLike + prior)
-}
-betaOptim <- optim(c(.1,.1), objectiveFn, control=list(reltol=1e-16))$par
+  # using optim
+  objectiveFn <- function(p) {
+    mu <- exp(x %*% p)
+    logLike <- sum(dnbinom(y, mu=mu, size=1/alpha, log=TRUE))
+    prior <- dnorm(p[2], 0, sqrt(1/lambda),log=TRUE)
+    -1 * (logLike + prior)
+  }
+  betaOptim <- optim(c(.1,.1), objectiveFn, control=list(reltol=1e-16))$par
 
-expect_equal(as.numeric(betaDESeq), betaIRLS, tolerance=1e-6)
-expect_equal(as.numeric(betaDESeq), betaOptim, tolerance=1e-6)
+  expect_equal(as.numeric(betaDESeq), betaIRLS, tolerance=1e-6)
+  expect_equal(as.numeric(betaDESeq), betaOptim, tolerance=1e-6)
+})
diff --git a/tests/testthat/test_collapse.R b/tests/testthat/test_collapse.R
index d6c0194..ab745fd 100644
--- a/tests/testthat/test_collapse.R
+++ b/tests/testthat/test_collapse.R
@@ -1,6 +1,9 @@
-dds <- makeExampleDESeqDataSet(n=10, m=8)
-dds$sample <- rep(1:4, each=2)
-dds$run <- 1:8
-dds2 <- collapseReplicates(dds, groupby=dds$sample, run=dds$run)
-expect_true(all(counts(dds2)[,1] == rowSums(counts(dds)[,1:2])))
-expect_true(dds2$runsCollapsed[1] == "1,2")
+context("collapse")
+test_that("collapse replicates works", {
+  dds <- makeExampleDESeqDataSet(n=10, m=8)
+  dds$sample <- rep(1:4, each=2)
+  dds$run <- 1:8
+  dds2 <- collapseReplicates(dds, groupby=dds$sample, run=dds$run)
+  expect_true(all(counts(dds2)[,1] == rowSums(counts(dds)[,1:2])))
+  expect_true(dds2$runsCollapsed[1] == "1,2")
+})
diff --git a/tests/testthat/test_construction_errors.R b/tests/testthat/test_construction_errors.R
index 3434869..1c4347c 100644
--- a/tests/testthat/test_construction_errors.R
+++ b/tests/testthat/test_construction_errors.R
@@ -1,37 +1,40 @@
-coldata <- DataFrame(x=factor(c("A","A","B","B")),
-                     name=letters[1:4],
-                     ident=factor(rep("A",4)),
-                     num=1:4,
-                     missinglevels=factor(c("A","A","B","B"), levels=c("A","B","C")),
-                     notref=factor(c("control","control","abc","abc")),
-                     row.names=1:4)
-counts <- matrix(1:16, ncol=4)
+context("construction_errors")
+test_that("proper errors thrown in object construction", {
+  coldata <- DataFrame(x=factor(c("A","A","B","B")),
+                       name=letters[1:4],
+                       ident=factor(rep("A",4)),
+                       num=1:4,
+                       missinglevels=factor(c("A","A","B","B"), levels=c("A","B","C")),
+                       notref=factor(c("control","control","abc","abc")),
+                       row.names=1:4)
+  counts <- matrix(1:16, ncol=4)
 
-expect_message(DESeqDataSet(SummarizedExperiment(list(foo=counts), colData=coldata), ~ x))
-expect_error(DESeqDataSetFromMatrix(matrix(c(1:11,-1),ncol=4), coldata, ~ x))
-expect_error(DESeqDataSetFromMatrix(matrix(c(1:11,0.5),ncol=4), coldata, ~ x))
-expect_error(DESeqDataSetFromMatrix(matrix(rep(0,16),ncol=4), coldata, ~ x))
-expect_warning(DESeqDataSetFromMatrix(matrix(rep(1:4,4),ncol=4), coldata, ~ x))
-expect_warning(DESeqDataSetFromMatrix(matrix(1:16, ncol=4, dimnames=list(c(1,2,3,3),1:4)), coldata, ~ x))
-expect_error(DESeqDataSetFromMatrix(counts, coldata, ~ y))
-expect_warning(DESeqDataSetFromMatrix(counts, coldata, ~ name))
-expect_error(DESeqDataSetFromMatrix(counts, coldata, ~ ident))
-expect_message(DESeqDataSetFromMatrix(counts, coldata, ~ num))
-expect_message(DESeqDataSetFromMatrix(counts, coldata, ~ missinglevels))
-expect_message(DESeqDataSetFromMatrix(counts, coldata, ~ notref))
-expect_error(DESeqDataSetFromMatrix(counts, coldata, ~ident + x), "design contains")
+  expect_message(DESeqDataSet(SummarizedExperiment(list(foo=counts), colData=coldata), ~ x))
+  expect_error(DESeqDataSetFromMatrix(matrix(c(1:11,-1),ncol=4), coldata, ~ x))
+  expect_error(DESeqDataSetFromMatrix(matrix(c(1:11,0.5),ncol=4), coldata, ~ x))
+  expect_error(DESeqDataSetFromMatrix(matrix(rep(0,16),ncol=4), coldata, ~ x))
+  expect_warning(DESeqDataSetFromMatrix(matrix(rep(1:4,4),ncol=4), coldata, ~ x))
+  expect_warning(DESeqDataSetFromMatrix(matrix(1:16, ncol=4, dimnames=list(c(1,2,3,3),1:4)), coldata, ~ x))
+  expect_error(DESeqDataSetFromMatrix(counts, coldata, ~ y))
+  expect_warning(DESeqDataSetFromMatrix(counts, coldata, ~ name))
+  expect_error(DESeqDataSetFromMatrix(counts, coldata, ~ ident))
+  expect_message(DESeqDataSetFromMatrix(counts, coldata, ~ num))
+  expect_message(DESeqDataSetFromMatrix(counts, coldata, ~ missinglevels))
+  expect_message(DESeqDataSetFromMatrix(counts, coldata, ~ notref))
+  expect_error(DESeqDataSetFromMatrix(counts, coldata, ~ident + x), "design contains")
 
-# same colnames but in different order:
-expect_error(DESeqDataSetFromMatrix(matrix(1:16, ncol=4, dimnames=list(1:4, 4:1)), coldata, ~ x))
+  # same colnames but in different order:
+  expect_error(DESeqDataSetFromMatrix(matrix(1:16, ncol=4, dimnames=list(1:4, 4:1)), coldata, ~ x))
 
-# testing incoming metadata columns
-coldata <- DataFrame(x=factor(c("A","A","B","B")))
-rowranges <- GRanges("1", IRanges(1 + 0:3 * 10, width=10))
-se <- SummarizedExperiment(list(counts=counts), colData=coldata, rowRanges=rowranges)
-mcols(colData(se)) <- DataFrame(info="x is a factor")
-mcols(se)$id <- 1:4
-mcols(mcols(se)) <- DataFrame(info="the gene id")
-dds <- DESeqDataSet(se, ~ x)
-mcols(colData(dds))
-mcols(mcols(dds))
+  # testing incoming metadata columns
+  coldata <- DataFrame(x=factor(c("A","A","B","B")))
+  rowranges <- GRanges("1", IRanges(1 + 0:3 * 10, width=10))
+  se <- SummarizedExperiment(list(counts=counts), colData=coldata, rowRanges=rowranges)
+  mcols(colData(se)) <- DataFrame(info="x is a factor")
+  mcols(se)$id <- 1:4
+  mcols(mcols(se)) <- DataFrame(info="the gene id")
+  dds <- DESeqDataSet(se, ~ x)
+  mcols(colData(dds))
+  mcols(mcols(dds))
 
+})
diff --git a/tests/testthat/test_counts_input.R b/tests/testthat/test_counts_input.R
index dbb0a90..5fa1e72 100644
--- a/tests/testthat/test_counts_input.R
+++ b/tests/testthat/test_counts_input.R
@@ -1,13 +1,16 @@
-# count matrix input
-cnts <- matrix(rnbinom(40,mu=100,size=2),ncol=4)
-mode(cnts) <- "integer"
-coldata <- data.frame(cond=factor(c("A","A","B","B")))
-dds <- DESeqDataSetFromMatrix(cnts, coldata, ~cond)
+context("counts_input")
+test_that("counts can be supplied as input (tidy or not)", {
+  # count matrix input
+  cnts <- matrix(rnbinom(40,mu=100,size=2),ncol=4)
+  mode(cnts) <- "integer"
+  coldata <- data.frame(cond=factor(c("A","A","B","B")))
+  dds <- DESeqDataSetFromMatrix(cnts, coldata, ~cond)
 
-# tidy data frame input
-gene.names <- paste0("gene",1:10)
-rownames(coldata) <- colnames(cnts) <- letters[1:4]
-tidy.counts <- cbind(gene.names, as.data.frame(cnts))
-dds <- DESeqDataSetFromMatrix(tidy.counts, coldata, ~cond, tidy=TRUE)
-expect_true(all(rownames(dds) == gene.names))
-expect_true(all(counts(dds) == cnts))
+  # tidy data frame input
+  gene.names <- paste0("gene",1:10)
+  rownames(coldata) <- colnames(cnts) <- letters[1:4]
+  tidy.counts <- cbind(gene.names, as.data.frame(cnts))
+  dds <- DESeqDataSetFromMatrix(tidy.counts, coldata, ~cond, tidy=TRUE)
+  expect_true(all(rownames(dds) == gene.names))
+  expect_true(all(counts(dds) == cnts))
+})
diff --git a/tests/testthat/test_custom_filt.R b/tests/testthat/test_custom_filt.R
index cab2026..0bfa3d8 100644
--- a/tests/testthat/test_custom_filt.R
+++ b/tests/testthat/test_custom_filt.R
@@ -1,25 +1,28 @@
-# try a custom filter function
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=200, m=4, betaSD=rep(c(0,2),c(150,50)))
-dds <- DESeq(dds)
-res <- results(dds)
-method <- "BH"
-alpha <- 0.1
+context("custom_filt")
+test_that("custom filters can be provided to results()", {
+  # try a custom filter function
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=200, m=4, betaSD=rep(c(0,2),c(150,50)))
+  dds <- DESeq(dds)
+  res <- results(dds)
+  method <- "BH"
+  alpha <- 0.1
 
-customFilt <- function(res, filter, alpha, method) {
-  if (missing(filter)) {
-    filter <- res$baseMean
+  customFilt <- function(res, filter, alpha, method) {
+    if (missing(filter)) {
+      filter <- res$baseMean
+    }
+    theta <- 0:10/10
+    cutoff <- quantile(filter, theta)
+    numRej <- sapply(cutoff, function(x) sum(p.adjust(res$pvalue[filter > x]) < alpha, na.rm=TRUE))
+    threshold <- theta[which(numRej == max(numRej))[1]]
+    res$padj <- numeric(nrow(res))
+    idx <- filter > quantile(filter, threshold)
+    res$padj[!idx] <- NA
+    res$padj[idx] <- p.adjust(res$pvalue[idx], method=method)
+    res
   }
-  theta <- 0:10/10
-  cutoff <- quantile(filter, theta)
-  numRej <- sapply(cutoff, function(x) sum(p.adjust(res$pvalue[filter > x]) < alpha, na.rm=TRUE))
-  threshold <- theta[which(numRej == max(numRej))[1]]
-  res$padj <- numeric(nrow(res))
-  idx <- filter > quantile(filter, threshold)
-  res$padj[!idx] <- NA
-  res$padj[idx] <- p.adjust(res$pvalue[idx], method=method)
-  res
-}
 
-resCustom <- results(dds, filterFun=customFilt)
-plot(res$padj, resCustom$padj);abline(0,1)
+  resCustom <- results(dds, filterFun=customFilt)
+  plot(res$padj, resCustom$padj);abline(0,1)
+})
diff --git a/tests/testthat/test_disp_fit.R b/tests/testthat/test_disp_fit.R
index 58340b9..97103e2 100644
--- a/tests/testthat/test_disp_fit.R
+++ b/tests/testthat/test_disp_fit.R
@@ -1,113 +1,98 @@
-# test the optimization of the logarithm of dispersion (alpha)
-# parameter with Cox-Reid adjustment and prior distribution.
-# also test the derivatives of the log posterior w.r.t. log alpha
-m <- 10
-set.seed(1)
-y <- rpois(m,20)
-sf <- rep(1,m)
-condition <- factor(rep(0:1,each=m/2))
-x <- cbind(rep(1,m),rep(0:1,each=m/2))
-colnames(x) <- c("Intercept","condition")
-
-lambda <- 2
-alpha <- .5
-
-# make a DESeqDataSet but don't use the design formula
-# instead we supply a model matrix below
-dds <- DESeqDataSetFromMatrix(matrix(y,nrow=1),
-                              colData=DataFrame(condition),
-                              design= ~ condition)
-sizeFactors(dds) <- sf
-dispersions(dds) <- alpha
-mcols(dds)$baseMean <- mean(y)
-
-# for testing we convert beta to the naturual log scale:
-# convert lambda from log to log2 scale by multiplying by log(2)^2
-# then convert beta back from log2 to log scale by multiplying by log(2)
-betaDESeq <- log(2)*DESeq2:::fitNbinomGLMs(dds, lambda=c(0,lambda*log(2)^2),
-                                           modelMatrix=x)$betaMatrix
-
-log_alpha_prior_mean <- .5
-log_alpha_prior_sigmasq <- 1
-mu.hat <- as.numeric(exp(x %*% t(betaDESeq)))
-
-dispRes <- DESeq2:::fitDisp(ySEXP = matrix(y,nrow=1), xSEXP = x,
-                            mu_hatSEXP = matrix(mu.hat,nrow=1), log_alphaSEXP = 0,
-                            log_alpha_prior_meanSEXP = log_alpha_prior_mean,
-                            log_alpha_prior_sigmasqSEXP = log_alpha_prior_sigmasq,
-                            min_log_alphaSEXP = log(1e-8), kappa_0SEXP = 1,
-                            tolSEXP = 1e-16, maxitSEXP = 100, use_priorSEXP = TRUE)
-
-# maximum a posteriori (MAP) estimate from DESeq
-dispDESeq <- dispRes$log_alpha
-
-# MAP estimate using optim
-logPost <- function(log.alpha) {
-  alpha <- exp(log.alpha)
-  w <- diag(1/(1/mu.hat^2 * ( mu.hat + alpha * mu.hat^2 )))
-  logLike <- sum(dnbinom(y, mu=mu.hat, size=1/alpha, log=TRUE))
-  coxReid <- -.5*(log(det(t(x) %*% w %*% x)))
-  logPrior <- dnorm(log.alpha, log_alpha_prior_mean, sqrt(log_alpha_prior_sigmasq), log=TRUE)
-  (logLike + coxReid + logPrior)
-}
-
-dispOptim <- optim(0, function(p) -1*logPost(p), control=list(reltol=1e-16),
-                   method="Brent", lower=-10, upper=10)$par
-
-expect_equal(dispDESeq, dispOptim, tolerance=1e-6)
-
-# check derivatives:
-
-# from Ted Harding https://stat.ethz.ch/pipermail/r-help/2007-September/140013.html
-num.deriv <- function(f,x,h=0.001) (f(x + h/2) - f(x-h/2))/h
-num.2nd.deriv <- function(f,x,h=0.001) (f(x + h) - 2*f(x) + f(x - h))/h^2
-
-# first derivative of log posterior w.r.t log alpha at start
-dispDerivDESeq <- dispRes$initial_dlp
-dispDerivNum <- num.deriv(logPost,0)
-
-expect_equal(dispDerivDESeq, dispDerivNum, tolerance=1e-6)
-
-# second derivative at finish
-dispD2DESeq <- dispRes$last_d2lp
-dispD2Num <- num.2nd.deriv(logPost, dispRes$log_alpha)
-
-expect_equal(dispD2DESeq, dispD2Num, tolerance=1e-6)
-
-
-# test fit alternative
-dds <- makeExampleDESeqDataSet()
-dds <- estimateSizeFactors(dds)
-ddsLocal <- estimateDispersions(dds, fitType="local")
-ddsMean <- estimateDispersions(dds, fitType="mean")
-ddsMed <- estimateDispersionsGeneEst(dds)
-useForMedian <- mcols(ddsMed)$dispGeneEst > 1e-7
-medianDisp <- median(mcols(ddsMed)$dispGeneEst[useForMedian],na.rm=TRUE)
-dispersionFunction(ddsMed) <- function(mu) medianDisp
-ddsMed <- estimateDispersionsMAP(ddsMed)  
-
-
-# test iterative
-set.seed(1)
-dds <- makeExampleDESeqDataSet(m=50,n=100,betaSD=1,interceptMean=8)
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersionsGeneEst(dds, niter=5)
-with(mcols(dds)[!mcols(dds)$allZero,],
-     expect_equal(log(trueDisp), log(dispGeneEst),tol=0.2))
-
-
-# test dispersion fitting in R
-set.seed(1)
-trueDisp <- c(.005,.01,.05,.1,.2,.5)
-trueMu <- 1000
-m <- 200
-x <- cbind(rep(1,m),rep(0:1,each=m/2))
-y <- matrix(rnbinom(length(trueDisp)*m, mu=trueMu, size=1/rep(trueDisp,m)),ncol=m)
-mu <- matrix(rep(rowMeans(y),m),ncol=m)
-disp <- DESeq2:::fitDispInR(y = y, x = x, mu = mu,
-                            logAlphaPriorMean = NA,
-                            logAlphaPriorSigmaSq = NA,
-                            usePrior=FALSE)
-# plot(log(trueDisp), log(disp));abline(0,1)
-expect_equal(log(trueDisp), log(disp), tol=.5)
-
+context("disp_fit")
+test_that("the fitting of dispersion gives expected values using various methods", {
+  # test the optimization of the logarithm of dispersion (alpha)
+  # parameter with Cox-Reid adjustment and prior distribution.
+  # also test the derivatives of the log posterior w.r.t. log alpha
+  m <- 10
+  set.seed(1)
+  y <- rpois(m,20)
+  sf <- rep(1,m)
+  condition <- factor(rep(0:1,each=m/2))
+  x <- cbind(rep(1,m),rep(0:1,each=m/2))
+  colnames(x) <- c("Intercept","condition")
+
+  lambda <- 2
+  alpha <- .5
+
+  # make a DESeqDataSet but don't use the design formula
+  # instead we supply a model matrix below
+  dds <- DESeqDataSetFromMatrix(matrix(y,nrow=1),
+                                colData=DataFrame(condition),
+                                design= ~ condition)
+  sizeFactors(dds) <- sf
+  dispersions(dds) <- alpha
+  mcols(dds)$baseMean <- mean(y)
+
+  # for testing we convert beta to the naturual log scale:
+  # convert lambda from log to log2 scale by multiplying by log(2)^2
+  # then convert beta back from log2 to log scale by multiplying by log(2)
+  betaDESeq <- log(2)*DESeq2:::fitNbinomGLMs(dds, lambda=c(0,lambda*log(2)^2),modelMatrix=x)$betaMatrix
+  log_alpha_prior_mean <- .5
+  log_alpha_prior_sigmasq <- 1
+  mu.hat <- as.numeric(exp(x %*% t(betaDESeq)))
+  
+  dispRes <- DESeq2:::fitDisp(ySEXP = matrix(y,nrow=1), xSEXP = x,
+                              mu_hatSEXP = matrix(mu.hat,nrow=1), log_alphaSEXP = 0,
+                              log_alpha_prior_meanSEXP = log_alpha_prior_mean,
+                              log_alpha_prior_sigmasqSEXP = log_alpha_prior_sigmasq,
+                              min_log_alphaSEXP = log(1e-8), kappa_0SEXP = 1,
+                              tolSEXP = 1e-16, maxitSEXP = 100, usePriorSEXP = TRUE,
+                              weightsSEXP=matrix(1,nrow=1,ncol=length(y)), useWeightsSEXP=FALSE)
+  
+  # maximum a posteriori (MAP) estimate from DESeq
+  dispDESeq <- dispRes$log_alpha
+  
+  # MAP estimate using optim
+  logPost <- function(log.alpha) {
+    alpha <- exp(log.alpha)
+    w <- diag(1/(1/mu.hat^2 * ( mu.hat + alpha * mu.hat^2 )))
+    logLike <- sum(dnbinom(y, mu=mu.hat, size=1/alpha, log=TRUE))
+    coxReid <- -.5*(log(det(t(x) %*% w %*% x)))
+    logPrior <- dnorm(log.alpha, log_alpha_prior_mean, sqrt(log_alpha_prior_sigmasq), log=TRUE)
+    (logLike + coxReid + logPrior)
+  }
+  
+  dispOptim <- optim(0, function(p) -1*logPost(p), control=list(reltol=1e-16), method="Brent", lower=-10, upper=10)$par
+                     
+  expect_equal(dispDESeq, dispOptim, tolerance=1e-6)
+  
+  # check derivatives:
+  
+  # from Ted Harding https://stat.ethz.ch/pipermail/r-help/2007-September/140013.html
+  num.deriv <- function(f,x,h=0.001) (f(x + h/2) - f(x-h/2))/h
+  num.2nd.deriv <- function(f,x,h=0.001) (f(x + h) - 2*f(x) + f(x - h))/h^2
+
+  # first derivative of log posterior w.r.t log alpha at start
+  dispDerivDESeq <- dispRes$initial_dlp
+  dispDerivNum <- num.deriv(logPost,0)
+
+  expect_equal(dispDerivDESeq, dispDerivNum, tolerance=1e-6)
+
+  # second derivative at finish
+  dispD2DESeq <- dispRes$last_d2lp
+  dispD2Num <- num.2nd.deriv(logPost, dispRes$log_alpha)
+
+  expect_equal(dispD2DESeq, dispD2Num, tolerance=1e-6)
+
+
+  # test fit alternative
+  dds <- makeExampleDESeqDataSet()
+  dds <- estimateSizeFactors(dds)
+  ddsLocal <- estimateDispersions(dds, fitType="local")
+  ddsMean <- estimateDispersions(dds, fitType="mean")
+  ddsMed <- estimateDispersionsGeneEst(dds)
+  useForMedian <- mcols(ddsMed)$dispGeneEst > 1e-7
+  medianDisp <- median(mcols(ddsMed)$dispGeneEst[useForMedian],na.rm=TRUE)
+  dispersionFunction(ddsMed) <- function(mu) medianDisp
+  ddsMed <- estimateDispersionsMAP(ddsMed)  
+
+
+  # test iterative
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(m=50,n=100,betaSD=1,interceptMean=8)
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersionsGeneEst(dds, niter=5)
+  with(mcols(dds)[!mcols(dds)$allZero,],
+       expect_equal(log(trueDisp), log(dispGeneEst),tol=0.2))
+
+})
diff --git a/tests/testthat/test_dispersions.R b/tests/testthat/test_dispersions.R
index 435d057..8e6ff0d 100644
--- a/tests/testthat/test_dispersions.R
+++ b/tests/testthat/test_dispersions.R
@@ -1,28 +1,29 @@
-# test disperion errors
+context("dispersions")
+test_that("expected errors thrown during dispersion estimation", {
+  dds <- makeExampleDESeqDataSet(n=100, m=2)
+  dds <- estimateSizeFactors(dds)
+  expect_error(estimateDispersionsGeneEst(dds))
 
-dds <- makeExampleDESeqDataSet(n=100, m=2)
-dds <- estimateSizeFactors(dds)
-expect_error(estimateDispersionsGeneEst(dds))
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100, m=4, dispMeanRel=function(x) 0.001 + x/1e3, interceptMean=8, interceptSD=2)
+  dds <- estimateSizeFactors(dds)
+  mcols(dds)$dispGeneEst <- rep(1e-7, 100)
+  expect_error(estimateDispersionsFit(dds))
+  dds <- estimateDispersionsGeneEst(dds)
+  expect_message(estimateDispersionsFit(dds))
 
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100, m=4, dispMeanRel=function(x) 0.001 + x/1e3, interceptMean=8, interceptSD=2)
-dds <- estimateSizeFactors(dds)
-mcols(dds)$dispGeneEst <- rep(1e-7, 100)
-expect_error(estimateDispersionsFit(dds))
-dds <- estimateDispersionsGeneEst(dds)
-expect_message(estimateDispersionsFit(dds))
+  dds <- makeExampleDESeqDataSet(n=100, m=4)
+  dds <- estimateSizeFactors(dds)
+  mcols(dds)$dispGeneEst <- rep(1e-7, 100)
+  dispersionFunction(dds) <- function(x) 1e-6
+  expect_warning(estimateDispersionsMAP(dds))
 
-dds <- makeExampleDESeqDataSet(n=100, m=4)
-dds <- estimateSizeFactors(dds)
-mcols(dds)$dispGeneEst <- rep(1e-7, 100)
-dispersionFunction(dds) <- function(x) 1e-6
-expect_warning(estimateDispersionsMAP(dds))
-
-dds <- makeExampleDESeqDataSet(n=100, m=4)
-dds <- estimateSizeFactors(dds)
-levels(dds$condition) <- c("A","B","C")
-expect_error(estimateDispersions(dds))
-dds$condition <- droplevels(dds$condition)
-dds$group <- dds$condition
-design(dds) <- ~ group + condition
-expect_error(estimateDispersions(dds))
+  dds <- makeExampleDESeqDataSet(n=100, m=4)
+  dds <- estimateSizeFactors(dds)
+  levels(dds$condition) <- c("A","B","C")
+  expect_error(estimateDispersions(dds))
+  dds$condition <- droplevels(dds$condition)
+  dds$group <- dds$condition
+  design(dds) <- ~ group + condition
+  expect_error(estimateDispersions(dds))
+})
diff --git a/tests/testthat/test_edge_case.R b/tests/testthat/test_edge_case.R
index cdd93b8..0371f91 100644
--- a/tests/testthat/test_edge_case.R
+++ b/tests/testthat/test_edge_case.R
@@ -1,46 +1,49 @@
-# one row
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=1)
-sizeFactors(dds) <- rep(1,ncol(dds))
-dispersions(dds) <- .5
-dds <- nbinomWaldTest(dds)
-res <- results(dds)
-dds <- nbinomLRT(dds, reduced=~1)
-res <- results(dds)
-
-
-# only intercept
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100)
-design(dds) <- ~ 1
-dds <- DESeq(dds)
-res <- results(dds)
-
-
-# metadata insertion
-dds <- makeExampleDESeqDataSet(n=50,m=4)
-
-dds2 <- DESeqDataSetFromMatrix( counts(dds), colData(dds), design(dds) )
-mcols(dds2)$foo <- paste( "bar", 1:nrow(dds2) )
-dds2 <- DESeq(dds2)
-results(dds2)
-expect_true(class(mcols(mcols(dds2))$type) == "character")
-
-dds3 <- DESeqDataSetFromMatrix( counts(dds), DataFrame(row.names=colnames(dds)), ~ 1 )
-dds3$test <- 1:ncol(dds3)
-dds3 <- estimateSizeFactors(dds3)
-expect_true(class(mcols(colData(dds3))$type) == "character")
-
-
-# underscores
-dds <- makeExampleDESeqDataSet(n=50,m=4)
-levels(dds$condition) <- c("A_1","B_2")
-dds$exp_cond <- dds$condition
-design(dds) <- ~ exp_cond
-dds <- DESeq(dds)
-results(dds)
-
-# NA in colData
-dds <- makeExampleDESeqDataSet(n=50,m=4)
-colData(dds)$condition[4] <- NA
-expect_error(DESeq(dds))
+context("edge_case")
+test_that("edge cases work or throw proper errors", {
+  # one row
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=1)
+  sizeFactors(dds) <- rep(1,ncol(dds))
+  dispersions(dds) <- .5
+  dds <- nbinomWaldTest(dds)
+  res <- results(dds)
+  dds <- nbinomLRT(dds, reduced=~1)
+  res <- results(dds)
+
+
+  # only intercept
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100)
+  design(dds) <- ~ 1
+  expect_warning({dds <- DESeq(dds)})
+  res <- results(dds)
+
+
+  # metadata insertion
+  dds <- makeExampleDESeqDataSet(n=50,m=4)
+
+  dds2 <- DESeqDataSetFromMatrix( counts(dds), colData(dds), design(dds) )
+  mcols(dds2)$foo <- paste( "bar", 1:nrow(dds2) )
+  dds2 <- DESeq(dds2)
+  results(dds2)
+  expect_true(class(mcols(mcols(dds2))$type) == "character")
+
+  dds3 <- DESeqDataSetFromMatrix( counts(dds), DataFrame(row.names=colnames(dds)), ~ 1 )
+  dds3$test <- 1:ncol(dds3)
+  dds3 <- estimateSizeFactors(dds3)
+  expect_true(class(mcols(colData(dds3))$type) == "character")
+
+
+  # underscores
+  dds <- makeExampleDESeqDataSet(n=50,m=4)
+  levels(dds$condition) <- c("A_1","B_2")
+  dds$exp_cond <- dds$condition
+  design(dds) <- ~ exp_cond
+  dds <- DESeq(dds)
+  results(dds)
+
+  # NA in colData
+  dds <- makeExampleDESeqDataSet(n=50,m=4)
+  colData(dds)$condition[4] <- NA
+  expect_error(DESeq(dds))
+})
diff --git a/tests/testthat/test_factors.R b/tests/testthat/test_factors.R
index c9d2e32..ea63196 100644
--- a/tests/testthat/test_factors.R
+++ b/tests/testthat/test_factors.R
@@ -1,9 +1,12 @@
-dds <- makeExampleDESeqDataSet(n=100, m=6)
-levels(dds$condition) <- c("test-","test+")
-expect_error(DESeq(dds))
+context("factors")
+test_that("bad factor in design throw errors", {
+  dds <- makeExampleDESeqDataSet(n=100, m=6)
+  levels(dds$condition) <- c("test-","test+")
+  expect_error(DESeq(dds))
 
-dds <- makeExampleDESeqDataSet(n=100, m=6)
-dds$condition <- factor(rep(letters[1:3], each=2), ordered=TRUE)
-expect_error(DESeq(dds))
-mm <- model.matrix(~ condition, data=colData(dds))
-dds <- DESeq(dds, full=mm) # betaPrior=FALSE
+  dds <- makeExampleDESeqDataSet(n=100, m=6)
+  dds$condition <- factor(rep(letters[1:3], each=2), ordered=TRUE)
+  expect_error(DESeq(dds))
+  mm <- model.matrix(~ condition, data=colData(dds))
+  dds <- DESeq(dds, full=mm) # betaPrior=FALSE
+})
diff --git a/tests/testthat/test_fpkm.R b/tests/testthat/test_fpkm.R
index 4e6b5a3..016482f 100644
--- a/tests/testthat/test_fpkm.R
+++ b/tests/testthat/test_fpkm.R
@@ -1,5 +1,8 @@
-dds <- DESeqDataSetFromMatrix(matrix(c(1:4,2 * 1:4), ncol=2), DataFrame(x=1:2), ~ 1)
-rowRanges(dds) <- GRanges("1", IRanges(start=0:3 * 10 + 1, width=10))
-expect_equal(fpkm(dds)[1,1], 1e5 * 100, tolerance=.1) 
-expect_equal(fpm(dds)[1,1], 1e5, tolerance=.1)
-expect_equal(fpm(dds, robust=FALSE)[1,1], 1e5)
+context("fpkm")
+test_that("fpkm works as expected", {
+  dds <- DESeqDataSetFromMatrix(matrix(c(1:4,2 * 1:4), ncol=2), DataFrame(x=1:2), ~ 1)
+  rowRanges(dds) <- GRanges("1", IRanges(start=0:3 * 10 + 1, width=10))
+  expect_equal(fpkm(dds)[1,1], 1e5 * 100, tolerance=.1) 
+  expect_equal(fpm(dds)[1,1], 1e5, tolerance=.1)
+  expect_equal(fpm(dds, robust=FALSE)[1,1], 1e5)
+})
diff --git a/tests/testthat/test_frozen_transform.R b/tests/testthat/test_frozen_transform.R
index 93df7de..1f4b245 100644
--- a/tests/testthat/test_frozen_transform.R
+++ b/tests/testthat/test_frozen_transform.R
@@ -1,27 +1,28 @@
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100)
-design(dds) <- ~ 1
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersions(dds)
+context("frozen_transform")
+test_that("frozen transforms works", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100)
+  design(dds) <- ~ 1
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersions(dds)
 
-expect_warning(ddsNew <- makeExampleDESeqDataSet(m=1,n=100))
-counts(ddsNew)[,1] <- counts(dds)[,1]
-sizeFactors(ddsNew)[1] <- sizeFactors(dds)[1]
-
-# VST
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
-dispersionFunction(ddsNew) <- dispersionFunction(dds)
-vsdNew <- varianceStabilizingTransformation(ddsNew, blind=FALSE)
-expect_equal(assay(vsd)[,1],assay(vsdNew)[,1],tolerance=1e-3)
-
-# rlog
-rld <- rlogTransformation(dds, blind=FALSE)  
-mcols(ddsNew)$dispFit <- mcols(dds)$dispFit
-betaPriorVar <- attr(rld,"betaPriorVar")
-intercept <- mcols(rld)$rlogIntercept
-rldNew <- rlogTransformation(ddsNew, blind=FALSE,
-                             betaPriorVar=betaPriorVar,
-                             intercept=intercept)
-expect_equal(assay(rld)[,1],assay(rldNew)[,1],tolerance=1e-3)
+  expect_warning(ddsNew <- makeExampleDESeqDataSet(m=1,n=100))
+  counts(ddsNew)[,1] <- counts(dds)[,1]
+  sizeFactors(ddsNew)[1] <- sizeFactors(dds)[1]
 
+  # VST
+  vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+  dispersionFunction(ddsNew) <- dispersionFunction(dds)
+  vsdNew <- varianceStabilizingTransformation(ddsNew, blind=FALSE)
+  expect_equal(assay(vsd)[,1],assay(vsdNew)[,1],tolerance=1e-3)
 
+  # rlog
+  rld <- rlogTransformation(dds, blind=FALSE)  
+  mcols(ddsNew)$dispFit <- mcols(dds)$dispFit
+  betaPriorVar <- attr(rld,"betaPriorVar")
+  intercept <- mcols(rld)$rlogIntercept
+  rldNew <- rlogTransformation(ddsNew, blind=FALSE,
+                               betaPriorVar=betaPriorVar,
+                               intercept=intercept)
+  expect_equal(assay(rld)[,1],assay(rldNew)[,1],tolerance=1e-3)
+})
diff --git a/tests/testthat/test_htseq.R b/tests/testthat/test_htseq.R
index 12fab65..aa2a0a2 100644
--- a/tests/testthat/test_htseq.R
+++ b/tests/testthat/test_htseq.R
@@ -1,7 +1,10 @@
-dir <- system.file(package="pasilla", "extdata")
-files <- grep("treated",list.files(dir),value=TRUE)
-sampleTable <- data.frame(id=seq_along(files), files,
-                          condition=factor(rep(c("t","u"),c(3,4))))
-setwd(dir)
-expect_error(DESeqDataSetFromHTSeqCount(sampleTable))
-dds <- DESeqDataSetFromHTSeqCount(sampleTable, design=~condition)
+context("htseq")
+test_that("htseq", {
+  dir <- system.file(package="pasilla", "extdata")
+  files <- grep("treated",list.files(dir),value=TRUE)
+  sampleTable <- data.frame(id=seq_along(files), files,
+                            condition=factor(rep(c("t","u"),c(3,4))))
+  setwd(dir)
+  expect_error(DESeqDataSetFromHTSeqCount(sampleTable))
+  dds <- DESeqDataSetFromHTSeqCount(sampleTable, design=~condition)
+})
diff --git a/tests/testthat/test_interactions.R b/tests/testthat/test_interactions.R
index 909907f..19d1f97 100644
--- a/tests/testthat/test_interactions.R
+++ b/tests/testthat/test_interactions.R
@@ -1,8 +1,10 @@
-dds <- makeExampleDESeqDataSet(n=100,m=8)
-colData(dds)$group <- factor(rep(c("X","Y"),times=ncol(dds)/2))
-design(dds) <- ~ condition + group + condition:group
-dds <- DESeq(dds)
-expect_equal(resultsNames(dds)[4], "conditionB.groupY")
-# interactions error
-expect_error(DESeq(dds, betaPrior=TRUE))
-
+context("interactions")
+test_that("interactions throw error", {
+  dds <- makeExampleDESeqDataSet(n=100,m=8)
+  colData(dds)$group <- factor(rep(c("X","Y"),times=ncol(dds)/2))
+  design(dds) <- ~ condition + group + condition:group
+  dds <- DESeq(dds)
+  expect_equal(resultsNames(dds)[4], "conditionB.groupY")
+  # interactions error
+  expect_error(DESeq(dds, betaPrior=TRUE))
+})
diff --git a/tests/testthat/test_lfcShrink.R b/tests/testthat/test_lfcShrink.R
new file mode 100644
index 0000000..61dbb12
--- /dev/null
+++ b/tests/testthat/test_lfcShrink.R
@@ -0,0 +1,16 @@
+context("lfcShrink")
+test_that("LFC shrinkage works", {
+  dds <- makeExampleDESeqDataSet(betaSD=1)
+  dds <- estimateSizeFactors(dds)
+  expect_error(lfcShrink(dds, 2, 1))
+  dds <- estimateDispersions(dds)
+  lfc <- lfcShrink(dds=dds, coef=2)
+  dds <- DESeq(dds, betaPrior=FALSE)
+  res <- results(dds)
+  res.shr <- lfcShrink(dds=dds, coef=2, res=res)
+  plotMA(res.shr)
+  res.shr <- lfcShrink(dds=dds,
+                       contrast=c("condition","B","A"),
+                       res=res)
+  plotMA(res.shr)
+})  
diff --git a/tests/testthat/test_linear_mu.R b/tests/testthat/test_linear_mu.R
index 9bf0bb9..d85f179 100644
--- a/tests/testthat/test_linear_mu.R
+++ b/tests/testthat/test_linear_mu.R
@@ -1,20 +1,23 @@
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100, m=4, interceptMean=10, interceptSD=3,
-                               dispMeanRel=function(x) 0.5, sizeFactors=c(.5,1,1,2))
-dds <- estimateSizeFactors(dds)
-dds1 <- estimateDispersionsGeneEst(dds, linearMu=FALSE)
-dds2 <- estimateDispersionsGeneEst(dds, linearMu=TRUE)
-mu1 <- assays(dds1)[["mu"]]
-mu2 <- assays(dds2)[["mu"]]
-par(mfrow=c(2,2),mar=c(3,3,1,1))
-for (i in 1:4) {
-  plot(mu1[,i], mu2[,i], xlab="", ylab="", log="xy")
-  abline(0,1)
-}
-cors <- diag(cor(mu1, mu2, use="complete"))
-expect_true(all(cors > 1 - 1e-6))
-#
-dds2 <- estimateDispersionsFit(dds2, fitType="mean")
-dds2 <- estimateDispersionsMAP(dds2)
-dds2 <- nbinomWaldTest(dds2)
-res <- results(dds2)
+context("linear_mu")
+test_that("the use of linear model for fitting mu works as expected", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100, m=4, interceptMean=10, interceptSD=3,
+                                 dispMeanRel=function(x) 0.5, sizeFactors=c(.5,1,1,2))
+  dds <- estimateSizeFactors(dds)
+  dds1 <- estimateDispersionsGeneEst(dds, linearMu=FALSE)
+  dds2 <- estimateDispersionsGeneEst(dds, linearMu=TRUE)
+  mu1 <- assays(dds1)[["mu"]]
+  mu2 <- assays(dds2)[["mu"]]
+  par(mfrow=c(2,2),mar=c(3,3,1,1))
+  for (i in 1:4) {
+    plot(mu1[,i], mu2[,i], xlab="", ylab="", log="xy")
+    abline(0,1)
+  }
+  cors <- diag(cor(mu1, mu2, use="complete"))
+  expect_true(all(cors > 1 - 1e-6))
+  #
+  dds2 <- estimateDispersionsFit(dds2, fitType="mean")
+  dds2 <- estimateDispersionsMAP(dds2)
+  dds2 <- nbinomWaldTest(dds2)
+  res <- results(dds2)
+})
diff --git a/tests/testthat/test_methods.R b/tests/testthat/test_methods.R
index d261e11..239a260 100644
--- a/tests/testthat/test_methods.R
+++ b/tests/testthat/test_methods.R
@@ -1,8 +1,11 @@
-coldata <- DataFrame(x=factor(c("A","A","B","B")))
-counts <- matrix(1:16, ncol=4)
-dds <- DESeqDataSetFromMatrix(counts, coldata, ~ x)
-expect_warning(counts(dds, replace=TRUE))
-expect_error(counts(dds, normalized=TRUE))
-expect_error(sizeFactors(dds) <- c(-1, -1, -1, -1))
-expect_error(normalizationFactors(dds) <- matrix(-1, ncol=4, nrow=4))
-expect_error(estimateDispersions(dds))
+context("methods")
+test_that("methods throw errors", {
+  coldata <- DataFrame(x=factor(c("A","A","B","B")))
+  counts <- matrix(1:16, ncol=4)
+  dds <- DESeqDataSetFromMatrix(counts, coldata, ~ x)
+  expect_warning(counts(dds, replace=TRUE))
+  expect_error(counts(dds, normalized=TRUE))
+  expect_error(sizeFactors(dds) <- c(-1, -1, -1, -1))
+  expect_error(normalizationFactors(dds) <- matrix(-1, ncol=4, nrow=4))
+  expect_error(estimateDispersions(dds))
+})
diff --git a/tests/testthat/test_model_matrix.R b/tests/testthat/test_model_matrix.R
index 29af080..ecb379a 100644
--- a/tests/testthat/test_model_matrix.R
+++ b/tests/testthat/test_model_matrix.R
@@ -1,28 +1,31 @@
-dds <- makeExampleDESeqDataSet(n=100, m=18)
-dds$group <- factor(rep(1:3,each=6))
-dds$condition <- factor(rep(rep(c("A","B","C"),each=2),3))
-# note: design is not used
-design(dds) <- ~ 1
-dds <- dds[,-c(17,18)]
+context("model_matrix")
+test_that("supplying custom model matrix works", {
+  dds <- makeExampleDESeqDataSet(n=100, m=18)
+  dds$group <- factor(rep(1:3,each=6))
+  dds$condition <- factor(rep(rep(c("A","B","C"),each=2),3))
+  # note: design is not used
+  design(dds) <- ~ 1
+  dds <- dds[,-c(17,18)]
 
-m1 <- model.matrix(~ group*condition, colData(dds))
-m1 <- m1[,-9]
-m0 <- model.matrix(~ group + condition, colData(dds))
+  m1 <- model.matrix(~ group*condition, colData(dds))
+  m1 <- m1[,-9]
+  m0 <- model.matrix(~ group + condition, colData(dds))
 
-dds <- DESeq(dds, full=m1, reduced=m0, test="LRT")
-results(dds)[1,]
-results(dds, name="group2.conditionC", test="Wald")[1,]
-dds <- removeResults(dds)
-dds <- DESeq(dds, full=m1, test="Wald", betaPrior=FALSE)
-results(dds)[1,]
+  dds <- DESeq(dds, full=m1, reduced=m0, test="LRT")
+  results(dds)[1,]
+  results(dds, name="group2.conditionC", test="Wald")[1,]
+  dds <- removeResults(dds)
+  dds <- DESeq(dds, full=m1, test="Wald", betaPrior=FALSE)
+  results(dds)[1,]
 
-# test better error than "error: inv(): matrix seems singular"
-coldata <- data.frame(group=factor(rep(1:3,each=6)),
-                      group2=factor(rep(1:3,each=6)),
-                      condition=factor(rep(1:6,3)))
-counts <- matrix(rpois(180, 100), ncol=18)
-m1 <- model.matrix(~ group + group2, coldata)
-m2 <- model.matrix(~ condition + group, coldata)
-dds <- DESeqDataSetFromMatrix(counts, coldata, ~group)
-expect_error(dds <- DESeq(dds, full=m1, fitType="mean"), "full rank")
-expect_error(dds <- DESeq(dds, full=m2, reduced=m1, test="LRT", fitType="mean"), "full rank")
+  # test better error than "error: inv(): matrix seems singular"
+  coldata <- data.frame(group=factor(rep(1:3,each=6)),
+                        group2=factor(rep(1:3,each=6)),
+                        condition=factor(rep(1:6,3)))
+  counts <- matrix(rpois(180, 100), ncol=18)
+  m1 <- model.matrix(~ group + group2, coldata)
+  m2 <- model.matrix(~ condition + group, coldata)
+  dds <- DESeqDataSetFromMatrix(counts, coldata, ~group)
+  expect_error(dds <- DESeq(dds, full=m1, fitType="mean"), "full rank")
+  expect_error(dds <- DESeq(dds, full=m2, reduced=m1, test="LRT", fitType="mean"), "full rank")
+})
diff --git a/tests/testthat/test_nbinomWald.R b/tests/testthat/test_nbinomWald.R
index 1578f0c..33ca300 100644
--- a/tests/testthat/test_nbinomWald.R
+++ b/tests/testthat/test_nbinomWald.R
@@ -1,22 +1,25 @@
-dds <- makeExampleDESeqDataSet(n=100, m=4)
-expect_error(nbinomWaldTest(dds))
-expect_error(nbinomLRT(dds))
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersions(dds)
-mm <- model.matrix(~ condition, colData(dds))
-mm0 <- model.matrix(~ 1, colData(dds))
-expect_error(nbinomWaldTest(dds, betaPrior=TRUE, modelMatrix=mm))
-expect_error(nbinomLRT(dds, betaPrior=TRUE, full=mm, reduced=mm0))
-expect_error(nbinomWaldTest(dds, betaPrior=FALSE, modelMatrixType="expanded"))
-expect_error(nbinomLRT(dds, betaPrior=FALSE, modelMatrixType="expanded"))
-dds2 <- estimateMLEForBetaPriorVar(dds)
-estimateBetaPriorVar(dds2, betaPriorMethod="quantile")
-dds <- nbinomWaldTest(dds, modelMatrixType="standard")
-covarianceMatrix(dds, 1)
+context("nbinomWald")
+test_that("nbinomWald throws various errors and works with edge cases",{
+  dds <- makeExampleDESeqDataSet(n=100, m=4)
+  expect_error(nbinomWaldTest(dds))
+  expect_error(nbinomLRT(dds))
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersions(dds)
+  mm <- model.matrix(~ condition, colData(dds))
+  mm0 <- model.matrix(~ 1, colData(dds))
+  expect_error(nbinomWaldTest(dds, betaPrior=TRUE, modelMatrix=mm))
+  expect_error(nbinomLRT(dds, betaPrior=TRUE, full=mm, reduced=mm0))
+  expect_error(nbinomWaldTest(dds, betaPrior=FALSE, modelMatrixType="expanded"))
+  expect_error(nbinomLRT(dds, betaPrior=FALSE, modelMatrixType="expanded"))
+  dds2 <- estimateMLEForBetaPriorVar(dds)
+  estimateBetaPriorVar(dds2, betaPriorMethod="quantile")
+  dds <- nbinomWaldTest(dds, modelMatrixType="standard")
+  covarianceMatrix(dds, 1)
 
-# try nbinom after no fitted dispersions
-dds <- makeExampleDESeqDataSet(n=100, m=4)
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersionsGeneEst(dds)
-dispersions(dds) <- mcols(dds)$dispGeneEst
-dds <- nbinomWaldTest(dds)
+  # try nbinom after no fitted dispersions
+  dds <- makeExampleDESeqDataSet(n=100, m=4)
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersionsGeneEst(dds)
+  dispersions(dds) <- mcols(dds)$dispGeneEst
+  dds <- nbinomWaldTest(dds)
+})
diff --git a/tests/testthat/test_optim.R b/tests/testthat/test_optim.R
index bd94fc8..a939e2e 100644
--- a/tests/testthat/test_optim.R
+++ b/tests/testthat/test_optim.R
@@ -1,38 +1,40 @@
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100,interceptMean=10,interceptSD=3)
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersions(dds)
-# make a large predictor to test scaling
-colData(dds)$condition <- rnorm(ncol(dds),0,1000)
-modelMatrix <- model.matrix(~ condition, as.data.frame(colData(dds)))
-fit <- DESeq2:::fitNbinomGLMs(dds, modelMatrix=modelMatrix, 
-                              modelFormula = ~ condition,
-                              alpha_hat = dispersions(dds),
-                              lambda = c(2,2),
-                              renameCols=TRUE, betaTol=1e-8,
-                              maxit=100, useOptim=TRUE,
-                              useQR=TRUE, forceOptim=FALSE)
-fitOptim <- DESeq2:::fitNbinomGLMs(dds, modelMatrix=modelMatrix, 
-                                   modelFormula = ~ condition,
-                                   alpha_hat = dispersions(dds),
-                                   lambda = c(2,2),
-                                   renameCols=TRUE, betaTol=1e-8,
-                                   maxit=100, useOptim=TRUE,
-                                   useQR=TRUE, forceOptim=TRUE)
-#plot(fit$betaMatrix[,2], fitOptim$betaMatrix[,2])
-#abline(0,1,col="red")
-expect_equal(fit$betaMatrix, fitOptim$betaMatrix,tolerance=1e-6)
-expect_equal(fit$betaSE, fitOptim$betaSE,tolerance=1e-6)
-
-# test optim gives same lfcSE
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100, m=10)
-counts(dds)[1,] <- c(rep(0L,5),c(1000L,1000L,0L,0L,0L))
-dds <- DESeq(dds, betaPrior=FALSE)
-# beta iter = 100 implies optim used for fitting
-expect_equal(mcols(dds)$betaIter[1], 100)
-res1 <- results(dds, contrast=c("condition","B","A"))
-res2 <- results(dds, contrast=c(0,1))
-expect_true(all.equal(res1$lfcSE, res2$lfcSE))
-expect_true(all.equal(res1$pvalue, res2$pvalue))
+context("optim")
+test_that("optim gives same results", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100,interceptMean=10,interceptSD=3)
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersions(dds)
+  # make a large predictor to test scaling
+  colData(dds)$condition <- rnorm(ncol(dds),0,1000)
+  modelMatrix <- model.matrix(~ condition, as.data.frame(colData(dds)))
+  fit <- DESeq2:::fitNbinomGLMs(dds, modelMatrix=modelMatrix, 
+                                modelFormula = ~ condition,
+                                alpha_hat = dispersions(dds),
+                                lambda = c(2,2),
+                                renameCols=TRUE, betaTol=1e-8,
+                                maxit=100, useOptim=TRUE,
+                                useQR=TRUE, forceOptim=FALSE)
+  fitOptim <- DESeq2:::fitNbinomGLMs(dds, modelMatrix=modelMatrix, 
+                                     modelFormula = ~ condition,
+                                     alpha_hat = dispersions(dds),
+                                     lambda = c(2,2),
+                                     renameCols=TRUE, betaTol=1e-8,
+                                     maxit=100, useOptim=TRUE,
+                                     useQR=TRUE, forceOptim=TRUE)
+  #plot(fit$betaMatrix[,2], fitOptim$betaMatrix[,2])
+  #abline(0,1,col="red")
+  expect_equal(fit$betaMatrix, fitOptim$betaMatrix,tolerance=1e-6)
+  expect_equal(fit$betaSE, fitOptim$betaSE,tolerance=1e-6)
 
+  # test optim gives same lfcSE
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100, m=10)
+  counts(dds)[1,] <- c(rep(0L,5),c(1000L,1000L,0L,0L,0L))
+  dds <- DESeq(dds, betaPrior=FALSE)
+  # beta iter = 100 implies optim used for fitting
+  expect_equal(mcols(dds)$betaIter[1], 100)
+  res1 <- results(dds, contrast=c("condition","B","A"))
+  res2 <- results(dds, contrast=c(0,1))
+  expect_true(all.equal(res1$lfcSE, res2$lfcSE))
+  expect_true(all.equal(res1$pvalue, res2$pvalue))
+})
diff --git a/tests/testthat/test_outlier.R b/tests/testthat/test_outlier.R
index 4e1898d..c11eb00 100644
--- a/tests/testthat/test_outlier.R
+++ b/tests/testthat/test_outlier.R
@@ -1,71 +1,73 @@
-# test filtering and replacement
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100, m=12, dispMeanRel = function(x) 4/x + .5)
-counts(dds)[1,] <- rep(0L, 12)
-counts(dds)[2,] <- c(100000L, rep(10L, 11))
-counts(dds)[3,] <- c(100000L, rep(0L, 11))
-dds0 <- DESeq(dds, minReplicatesForReplace=Inf)
-dds1 <- DESeq(dds, minReplicatesForReplace=6)
-pval0 <- results(dds0)[1:3,"pvalue"]
-pval <- results(dds1)[1:3,"pvalue"]
-LFC0 <- results(dds0)[1:3,"log2FoldChange"]
-LFC <- results(dds1)[1:3,"log2FoldChange"]
+context("outlier")
+test_that("outlier filtering and replacement works as expected", {
+  # test filtering and replacement
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100, m=12, dispMeanRel = function(x) 4/x + .5)
+  counts(dds)[1,] <- rep(0L, 12)
+  counts(dds)[2,] <- c(100000L, rep(10L, 11))
+  counts(dds)[3,] <- c(100000L, rep(0L, 11))
+  dds0 <- DESeq(dds, minReplicatesForReplace=Inf)
+  dds1 <- DESeq(dds, minReplicatesForReplace=6)
+  pval0 <- results(dds0)[1:3,"pvalue"]
+  pval <- results(dds1)[1:3,"pvalue"]
+  LFC0 <- results(dds0)[1:3,"log2FoldChange"]
+  LFC <- results(dds1)[1:3,"log2FoldChange"]
 
-# filtered
-expect_true(all(is.na(pval0)))
-# not filtered
-expect_true(all(!is.na(pval[2:3])))
-# counts still the same
-expect_true(all(counts(dds1)==counts(dds)))
-# first is NA
-expect_true(is.na(LFC[1]))
-# replaced, reduced LFC
-expect_true(abs(LFC[2]) < abs(LFC0[2]))
-# replaced, LFC now zero
-expect_true(LFC[3] == 0)
-idx <- which(!mcols(dds1)$replace)
-# the pvalue for those not replaced is equal
-expect_equal(results(dds1)$pvalue[idx], results(dds0)$pvalue[idx])
+  # filtered
+  expect_true(all(is.na(pval0)))
+  # not filtered
+  expect_true(all(!is.na(pval[2:3])))
+  # counts still the same
+  expect_true(all(counts(dds1)==counts(dds)))
+  # first is NA
+  expect_true(is.na(LFC[1]))
+  # replaced, reduced LFC
+  expect_true(abs(LFC[2]) < abs(LFC0[2]))
+  # replaced, LFC now zero
+  expect_true(LFC[3] == 0)
+  idx <- which(!mcols(dds1)$replace)
+  # the pvalue for those not replaced is equal
+  expect_equal(results(dds1)$pvalue[idx], results(dds0)$pvalue[idx])
 
-
-# check that outlier filtering catches throughout range of mu
-beta0 <- seq(from=1,to=16,length=100)
-idx <- rep(rep(c(TRUE,FALSE),c(1,9)),10)
-set.seed(1)
-#par(mfrow=c(2,3))
-for (disp0 in c(.01,.5)) {
-  for (m in c(10,20,80)) {
-    dds <- makeExampleDESeqDataSet(n=100, m=m, interceptMean=beta0, interceptSD=0,
-                                   dispMeanRel=function(x) 4/x + disp0)
-    counts(dds)[idx,1] <- as.integer(1000 * 2^beta0[idx])
-    dds <- DESeq(dds, minReplicatesForReplace=Inf, quiet=TRUE)
-    res <- results(dds)
-    cutoff <- qf(.99, 2, m-2)
-    outlierCooks <- assays(dds)[["cooks"]][idx,1] > cutoff
-    maxOtherCooks <- apply(assays(dds)[["cooks"]][idx,-1], 1, max) < cutoff
-    expect_true(all(is.na(res$pvalue[idx])))
-    expect_true(all(outlierCooks))
-    expect_true(all(maxOtherCooks))
-    #col <- rep("black", 100)
-    #col[idx] <- ifelse(outlierCooks, ifelse(maxOtherCooks, "blue", "red"), "purple")
-    #plot(assays(dds)[["cooks"]][,1], col=col, log="y",
-    #     main=paste(m,"-",disp0), ylab="cooks");abline(h=qf(.99,2,m-2))
+  # check that outlier filtering catches throughout range of mu
+  beta0 <- seq(from=1,to=16,length=100)
+  idx <- rep(rep(c(TRUE,FALSE),c(1,9)),10)
+  set.seed(1)
+  par(mfrow=c(2,3))
+  for (disp0 in c(.01,.1)) {
+    for (m in c(10,20,80)) {
+      dds <- makeExampleDESeqDataSet(n=100, m=m, interceptMean=beta0, interceptSD=0,
+                                     dispMeanRel=function(x) disp0)
+      counts(dds)[idx,1] <- as.integer(1000 * 2^beta0[idx])
+      dds <- DESeq(dds, minReplicatesForReplace=Inf, quiet=TRUE, fitType="mean")
+      res <- results(dds)
+      cutoff <- qf(.99, 2, m-2)
+      outlierCooks <- assays(dds)[["cooks"]][idx,1] > cutoff
+      nonoutlierCooks <- mcols(dds)$maxCooks[!idx] < cutoff
+      expect_true(all(is.na(res$pvalue[idx])))
+      expect_true(all(outlierCooks))
+      expect_true(all(nonoutlierCooks))
+      col <- rep("black", 100)
+      col[idx] <- "blue"
+      plot(2^beta0, mcols(dds)$maxCooks, col=col, log="xy",
+           main=paste(m,"-",disp0), xlab="mean", ylab="cooks")
+      abline(h=qf(.99,2,m-2))
+    }
   }
-}
 
-dds <- makeExampleDESeqDataSet(n=100)
-counts(dds)[1,1] <- 1000000L
-dds <- DESeq(dds, test="LRT", reduced=~1, minReplicatesForReplace=6)
-dds <- DESeq(dds, test="LRT", reduced=~1, betaPrior=TRUE, minReplicatesForReplace=6)
+  dds <- makeExampleDESeqDataSet(n=100)
+  counts(dds)[1,1] <- 1000000L
+  dds <- DESeq(dds, test="LRT", reduced=~1, minReplicatesForReplace=6)
 
-# test replace function
-dds <- makeExampleDESeqDataSet(n=100,m=4)
-expect_error(replaceOutliers(dds))
-dds <- DESeq(dds)
-expect_error(replaceOutliers(dds, minReplicates=2))
+  # test replace function
+  dds <- makeExampleDESeqDataSet(n=100,m=4)
+  expect_error(replaceOutliers(dds))
+  dds <- DESeq(dds)
+  expect_error(replaceOutliers(dds, minReplicates=2))
 
-# check model matrix standard bug
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100, m=20)
-counts(dds)[1,] <- c(100000L, rep(0L, 19))
-dds <- DESeq(dds, modelMatrixType="standard")
+  # check model matrix standard bug
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100, m=20)
+  counts(dds)[1,] <- c(100000L, rep(0L, 19))
+  dds <- DESeq(dds, modelMatrixType="standard")
+})
diff --git a/tests/testthat/test_parallel.R b/tests/testthat/test_parallel.R
index 08bb74f..63a7f4b 100644
--- a/tests/testthat/test_parallel.R
+++ b/tests/testthat/test_parallel.R
@@ -1,64 +1,67 @@
-dispMeanRel <- function(x) (4/x + .1) * exp(rnorm(length(x),0,sqrt(.5)))
-set.seed(1)
-dds0 <- makeExampleDESeqDataSet(n=100,dispMeanRel=dispMeanRel)
-counts(dds0)[51:60,] <- 0L
+context("parallel")
+test_that("parallel execution works as expected", {
+  dispMeanRel <- function(x) (4/x + .1) * exp(rnorm(length(x),0,sqrt(.5)))
+  set.seed(1)
+  dds0 <- makeExampleDESeqDataSet(n=100,dispMeanRel=dispMeanRel)
+  counts(dds0)[51:60,] <- 0L
 
-# the following is an example of a simple parallelizable DESeq() run
-# without outlier replacement. see DESeq2:::DESeqParallel for the code
-# which is actually used in DESeq()
+  # the following is an example of a simple parallelizable DESeq() run
+  # without outlier replacement. see DESeq2:::DESeqParallel for the code
+  # which is actually used in DESeq()
 
-nworkers <- 3
-idx <- factor(sort(rep(seq_len(nworkers),length=nrow(dds0))))
+  nworkers <- 3
+  idx <- factor(sort(rep(seq_len(nworkers),length=nrow(dds0))))
 
-### BEGINNING ###
+  ### BEGINNING ###
 
-dds <- estimateSizeFactors(dds0)
-dds <- do.call(rbind, lapply(levels(idx), function(l) {
-  estimateDispersionsGeneEst(dds[idx == l,,drop=FALSE])
-}))
-dds <- estimateDispersionsFit(dds)
-dispPriorVar <- estimateDispersionsPriorVar(dds)
-dds <- do.call(rbind, lapply(levels(idx), function(l) {
-  ddsSub <- estimateDispersionsMAP(dds[idx == l,,drop=FALSE], dispPriorVar=dispPriorVar)
-  estimateMLEForBetaPriorVar(ddsSub)
-}))
-betaPriorVar <- estimateBetaPriorVar(dds)
-dds <- do.call(rbind, lapply(levels(idx), function(l) {
-  nbinomWaldTest(dds[idx == l,,drop=FALSE], betaPriorVar=betaPriorVar)
-}))  
+  dds <- estimateSizeFactors(dds0)
+  dds <- do.call(rbind, lapply(levels(idx), function(l) {
+    estimateDispersionsGeneEst(dds[idx == l,,drop=FALSE])
+  }))
+  dds <- estimateDispersionsFit(dds)
+  dispPriorVar <- estimateDispersionsPriorVar(dds)
+  dds <- do.call(rbind, lapply(levels(idx), function(l) {
+    ddsSub <- estimateDispersionsMAP(dds[idx == l,,drop=FALSE], dispPriorVar=dispPriorVar)
+    estimateMLEForBetaPriorVar(ddsSub)
+  }))
+  betaPriorVar <- estimateBetaPriorVar(dds)
+  dds <- do.call(rbind, lapply(levels(idx), function(l) {
+    nbinomWaldTest(dds[idx == l,,drop=FALSE], betaPrior=TRUE, betaPriorVar=betaPriorVar)
+  }))  
 
-### END ###
+  ### END ###
 
-res1 <- results(dds)
+  res1 <- results(dds)
 
-dds2 <- DESeq(dds0)
-res2 <- results(dds2)
+  dds2 <- DESeq(dds0, betaPrior=TRUE, minRep=Inf)
+  res2 <- results(dds2)
 
-expect_equal(mcols(dds)$dispGeneEst, mcols(dds2)$dispGeneEst)
-expect_equal(mcols(dds)$dispFit, mcols(dds2)$dispFit)
-expect_equal(mcols(dds)$dispMAP, mcols(dds2)$dispMAP)
-expect_equal(mcols(dds)$dispersion, mcols(dds2)$dispersion)
-expect_equal(attr(dispersionFunction(dds), "dispPriorVar"),
-             attr(dispersionFunction(dds2), "dispPriorVar"))
-expect_equal(attr(dispersionFunction(dds), "varLogDispEsts"),
-             attr(dispersionFunction(dds2), "varLogDispEsts"))
-expect_equal(mcols(dds)$MLE_condition_B_vs_A,
-             mcols(dds2)$MLE_condition_B_vs_A)
-expect_equal(attr(dds, "betaPriorVar"),
-             attr(dds2, "betaPriorVar"))
-expect_equal(mcols(dds)$conditionB, mcols(dds2)$conditionB)  
-expect_equal(res1$log2FoldChange, res2$log2FoldChange)
-expect_equal(res1$pvalue, res2$pvalue)
+  expect_equal(mcols(dds)$dispGeneEst, mcols(dds2)$dispGeneEst)
+  expect_equal(mcols(dds)$dispFit, mcols(dds2)$dispFit)
+  expect_equal(mcols(dds)$dispMAP, mcols(dds2)$dispMAP)
+  expect_equal(mcols(dds)$dispersion, mcols(dds2)$dispersion)
+  expect_equal(attr(dispersionFunction(dds), "dispPriorVar"),
+               attr(dispersionFunction(dds2), "dispPriorVar"))
+  expect_equal(attr(dispersionFunction(dds), "varLogDispEsts"),
+               attr(dispersionFunction(dds2), "varLogDispEsts"))
+  expect_equal(mcols(dds)$MLE_condition_B_vs_A,
+               mcols(dds2)$MLE_condition_B_vs_A)
+  expect_equal(attr(dds, "betaPriorVar"),
+               attr(dds2, "betaPriorVar"))
+  expect_equal(mcols(dds)$conditionB, mcols(dds2)$conditionB)  
+  expect_equal(res1$log2FoldChange, res2$log2FoldChange)
+  expect_equal(res1$pvalue, res2$pvalue)
 
-library("BiocParallel")
-register(SerialParam())
-dds3 <- DESeq(dds0, parallel=TRUE)
-res3 <- results(dds3, parallel=TRUE)
-res4 <- results(dds3)
-expect_equal(res2$pvalue, res3$pvalue)
-expect_equal(res3$pvalue, res4$pvalue)  
-
-dds <- makeExampleDESeqDataSet(n=100,m=8)
-dds <- DESeq(dds, parallel=TRUE, test="LRT", reduced=~1)
-dds <- DESeq(dds, parallel=TRUE, test="LRT", reduced=~1, betaPrior=TRUE)
-dds <- DESeq(dds, parallel=TRUE, betaPrior=FALSE)
+  library("BiocParallel")
+  register(SerialParam())
+  dds3 <- DESeq(dds0, betaPrior=TRUE, parallel=TRUE)
+  res3 <- results(dds3, parallel=TRUE)
+  res4 <- results(dds3)
+  expect_equal(res2$pvalue, res3$pvalue)
+  expect_equal(res3$pvalue, res4$pvalue)  
+  expect_equal(res2$log2FoldChange, res3$log2FoldChange)
+  expect_equal(res3$log2FoldChange, res4$log2FoldChange)  
+  
+  dds <- makeExampleDESeqDataSet(n=100,m=8)
+  dds <- DESeq(dds, parallel=TRUE, test="LRT", reduced=~1)
+})
diff --git a/tests/testthat/test_plots.R b/tests/testthat/test_plots.R
index 4b92d70..f99162f 100644
--- a/tests/testthat/test_plots.R
+++ b/tests/testthat/test_plots.R
@@ -1,25 +1,30 @@
-# test plots
-dds <- makeExampleDESeqDataSet(n=100,m=8)
-dds$group <- factor(rep(c(1,2,1,2),each=2))
-dds <- DESeq(dds)
-res <- results(dds)
-plotDispEsts(dds)
-plotMA(dds)
-plotMA(dds, ylim=c(-1,1))
-plotCounts(dds, 1)
-plotCounts(dds, 1, intgroup=c("condition","group"))
-plotCounts(dds, 1, transform=TRUE)
-expect_error(plotCounts(dds, 1, intgroup="foo"))
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
-plotPCA(vsd)
-dat <- plotPCA(vsd, returnData=TRUE)
-plotPCA(vsd, intgroup=c("condition","group"))
-expect_error(plotPCA(vsd, intgroup="foo"))
-plotSparsity(dds)
+context("plots")
+test_that("plots work", {
+  # test plots
+  dds <- makeExampleDESeqDataSet(n=100,m=8)
+  dds$group <- factor(rep(c(1,2,1,2),each=2))
+  dds <- DESeq(dds)
+  res <- results(dds)
+  plotDispEsts(dds)
+  plotDispEsts(dds, CV=TRUE)
+  plotMA(dds)
+  plotMA(dds, ylim=c(-1,1))
+  plotCounts(dds, 1)
+  plotCounts(dds, 1, intgroup=c("condition","group"))
+  plotCounts(dds, 1, transform=TRUE)
+  expect_error(plotCounts(dds, 1, intgroup="foo"))
+  vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+  plotPCA(vsd)
+  dat <- plotPCA(vsd, returnData=TRUE)
+  plotPCA(vsd, intgroup=c("condition","group"))
+  expect_error(plotPCA(vsd, intgroup="foo"))
+  plotSparsity(dds)
 
-# plotMA MLE
-res <- results(dds)
-expect_error(plotMA(res, MLE=TRUE))
-res <- results(dds, addMLE=TRUE)
-plotMA(res, MLE=TRUE)
-dev.off()
+  # plotMA MLE
+  dds <- DESeq(dds, betaPrior=TRUE)
+  res <- results(dds)
+  expect_error(plotMA(res, MLE=TRUE))
+  res <- results(dds, addMLE=TRUE)
+  plotMA(res, MLE=TRUE)
+  dev.off()
+})
diff --git a/tests/testthat/test_results.R b/tests/testthat/test_results.R
index 6fa1507..4b61a90 100644
--- a/tests/testthat/test_results.R
+++ b/tests/testthat/test_results.R
@@ -1,153 +1,157 @@
-## test contrasts
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=200,m=12)
-dds$condition <- factor(rep(1:3,each=4))
-dds$group <- factor(rep(1:2,length=ncol(dds)))
-counts(dds)[1,] <- rep(c(100L,200L,800L),each=4)
-
-design(dds) <- ~ group + condition
-
-# calling results too early
-expect_error(results(dds))
-
-dds <- DESeq(dds)
-head(coef(dds))
-res <- results(dds)
-show.res <- capture.output(show(res))
-summary.res <- summary(res)
-
-# various results error checking
-expect_error(results(dds, test="LRT"))
-expect_error(results(dds, altHypothesis="lessAbs"))
-expect_error(results(dds, name=c("Intercept","group1")))
-expect_error(results(dds, contrast=c("foo","B","A")))
-expect_error(results(dds, contrast=c("condition","4","1")))
-expect_error(results(dds, test="foo"))
-expect_error(results(dds, contrast=FALSE))
-expect_error(results(dds, contrast=letters[1:4]))
-expect_error(results(dds, contrast=c("condition","1","1")))
-results(dds, independentFiltering=FALSE)
-results(dds, contrast=list("condition1"))
-expect_error(results(dds, contrast=list("condition1","condition2","condition3")))
-expect_error(results(dds, contrast=list("condition1",1)))
-expect_error(results(dds, contrast=list("condition1","foo")))
-expect_error(results(dds, contrast=list("condition1","condition1")))
-expect_error(results(dds, contrast=list(character(), character())))
-expect_error(results(dds, contrast=rep(0, 6)))
-
-# check to see if the contrasts with expanded model matrix
-# are close to expected (although shrunk due to the beta prior)
-lfc31 <- results(dds,contrast=c("condition","3","1"))[1,2]
-lfc21 <- results(dds,contrast=c("condition","2","1"))[1,2]
-lfc32 <- results(dds,contrast=c("condition","3","2"))[1,2]
-expect_equal(lfc31, 3, tolerance=.1)
-expect_equal(lfc21, 1, tolerance=.1)
-expect_equal(lfc32, 2, tolerance=.1)
-expect_equal(results(dds,contrast=c("condition","1","3"))[1,2], -3, tolerance=.1)
-expect_equal(results(dds,contrast=c("condition","1","2"))[1,2], -1, tolerance=.1)
-expect_equal(results(dds,contrast=c("condition","2","3"))[1,2], -2, tolerance=.1)
-
-# check that results are not changed by releveling
-dds2 <- dds
-colData(dds2)$condition <- relevel(colData(dds2)$condition, "2")
-dds2 <- DESeq(dds2)
-expect_equal(results(dds2,contrast=c("condition","3","1"))[1,2], lfc31, tolerance=1e-6)
-expect_equal(results(dds2,contrast=c("condition","2","1"))[1,2], lfc21, tolerance=1e-6)
-expect_equal(results(dds2,contrast=c("condition","3","2"))[1,2], lfc32, tolerance=1e-6)
-
-# test a number of contrast as list options
-expect_equal(results(dds, contrast=list("condition3","condition1"))[1,2], lfc31, tolerance=1e-6)
-results(dds, contrast=list("condition3","condition1"), listValues=c(.5,-.5))
-results(dds, contrast=list("condition3",character()))
-results(dds, contrast=list("condition3",character()), listValues=c(.5,-.5))
-results(dds, contrast=list(character(),"condition1"))
-results(dds, contrast=list(character(),"condition1"), listValues=c(.5,-.5))
-
-# test no prior on intercept
-expect_equivalent(attr(dds,"betaPriorVar")[1], 1e6)  
-
-# test thresholding
-results(dds, lfcThreshold=1)
-expect_error(results(dds, lfcThreshold=1, altHypothesis="lessAbs"))
-results(dds, lfcThreshold=1, altHypothesis="greater")
-results(dds, lfcThreshold=1, altHypothesis="less")
-
-ddsNoPrior <- DESeq(dds, betaPrior=FALSE)
-results(ddsNoPrior, lfcThreshold=1, altHypothesis="lessAbs")
-
-##################################################
-## test designs with zero intercept
-
-# test some special cases for results()
-# using designs with +0 
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100,m=12)
-dds$condition <- factor(rep(1:3,each=4))
-dds$group <- factor(rep(1:2,length=ncol(dds)))
-
-counts(dds)[1,] <- rep(c(100L,200L,400L),each=4)
-
-design(dds) <- ~ condition + 0
-dds <- DESeq(dds, betaPrior=FALSE)
-
-expect_equal(results(dds)[1,2], 2, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","2","1"))[1,2], 1, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","3","2"))[1,2], 1, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","1","3"))[1,2], -2, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","1","2"))[1,2], -1, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","2","3"))[1,2], -1, tolerance=.1)
-expect_error(results(dds, contrast=c("condition","4","1")))
-
-design(dds) <- ~ group + condition + 0
-dds <- DESeq(dds, betaPrior=FALSE)
-
-expect_equal(results(dds)[1,2], 2, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","2","1"))[1,2], 1, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","3","2"))[1,2], 1, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","1","3"))[1,2], -2, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","1","2"))[1,2], -1, tolerance=.1)
-expect_equal(results(dds, contrast=c("condition","2","3"))[1,2], -1, tolerance=.1)
-
-###############################################
-## test likelihood ratio test
-set.seed(1)
-dds <- makeExampleDESeqDataSet(n=100)
-dds$group <- factor(rep(1:2,6))
-design(dds) <- ~ group + condition
-dds <- DESeq(dds, test="LRT", reduced=~group)
-
-expect_true(!all(results(dds,name="condition_B_vs_A")$stat ==
-                   results(dds,name="condition_B_vs_A",test="Wald")$stat))
-
-# LFC are already MLE
-expect_error(results(dds, addMLE=TRUE))
-expect_error(results(dds, lfcThreshold=1, test="LRT"))
-
-expect_true(all(results(dds, test="LRT", contrast=c("group","1","2"))$log2FoldChange ==
-                -1 * results(dds, test="LRT", contrast=c("group","2","1"))$log2FoldChange))
-
-###############################################
-## test results basics
-dds <- makeExampleDESeqDataSet(n=100)
-dds <- DESeq(dds)
-res <- results(dds, format="GRanges")
-expect_warning(results(dds, format="GRangesList"))
-
-rowRanges(dds) <- as(rowRanges(dds), "GRangesList")
-dds <- DESeq(dds)
-expect_message(results(dds, format="GRanges"))
-
-# check tidy-ness
-res <- results(dds, tidy=TRUE)
-expect_true(colnames(res)[1] == "row")
-expect_true(is(res, "data.frame"))
-
-# test MLE and 'name'
-results(dds, addMLE=TRUE)
-expect_error(results(dds, name="condition_B_vs_A", addMLE=TRUE))
-
-# test remove results
-dds <- removeResults(dds)
-expect_true(!any(mcols(mcols(dds))$type == "results"))
-
-
+context("results")
+test_that("results works as expected and throws errors", {
+  ## test contrasts
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=200,m=12)
+  dds$condition <- factor(rep(1:3,each=4))
+  dds$group <- factor(rep(1:2,length=ncol(dds)))
+  counts(dds)[1,] <- rep(c(100L,200L,800L),each=4)
+
+  design(dds) <- ~ group + condition
+
+  # calling results too early
+  expect_error(results(dds))
+
+  sizeFactors(dds) <- rep(1, ncol(dds))
+  dds <- DESeq(dds)
+  head(coef(dds))
+  res <- results(dds)
+  show.res <- capture.output(show(res))
+  summary.res <- capture.output(summary(res))
+
+  # various results error checking
+  expect_error(results(dds, test="LRT"))
+  expect_error(results(dds, altHypothesis="lessAbs"))
+  expect_error(results(dds, name=c("Intercept","group1")))
+  expect_error(results(dds, contrast=c("foo","B","A")))
+  expect_error(results(dds, contrast=c("condition","4","1")))
+  expect_error(results(dds, test="foo"))
+  expect_error(results(dds, contrast=FALSE))
+  expect_error(results(dds, contrast=letters[1:4]))
+  expect_error(results(dds, contrast=c("condition","1","1")))
+  results(dds, independentFiltering=FALSE)
+  results(dds, contrast=list("condition_2_vs_1"))
+  expect_error(results(dds, contrast=list("condition_2_vs_1","condition_3_vs_1","condition_3_vs_1")))
+  expect_error(results(dds, contrast=list("condition_2_vs_1",1)))
+  expect_error(results(dds, contrast=list("condition_2_vs_1","foo")))
+  expect_error(results(dds, contrast=list("condition_2_vs_1","condition_2_vs_1")))
+  expect_error(results(dds, contrast=list(character(), character())))
+  expect_error(results(dds, contrast=rep(0, 6)))
+
+  # check to see if the contrasts with expanded model matrix
+  # are close to expected (although shrunk due to the beta prior).
+  # lfcShrink() here calls results()
+  lfc31 <- lfcShrink(dds,contrast=c("condition","3","1"))[1]
+  lfc21 <- lfcShrink(dds,contrast=c("condition","2","1"))[1]
+  lfc32 <- lfcShrink(dds,contrast=c("condition","3","2"))[1]
+  expect_equal(lfc31, 3, tolerance=.1)
+  expect_equal(lfc21, 1, tolerance=.1)
+  expect_equal(lfc32, 2, tolerance=.1)
+  expect_equal(results(dds,contrast=c("condition","1","3"))[1,2], -3, tolerance=.1)
+  expect_equal(results(dds,contrast=c("condition","1","2"))[1,2], -1, tolerance=.1)
+  expect_equal(results(dds,contrast=c("condition","2","3"))[1,2], -2, tolerance=.1)
+
+  # check that results are not changed by releveling
+  dds2 <- dds
+  colData(dds2)$condition <- relevel(colData(dds2)$condition, "2")
+  dds2 <- DESeq(dds2)
+  expect_equal(lfcShrink(dds2,contrast=c("condition","3","1"))[1], lfc31, tolerance=1e-6)
+  expect_equal(lfcShrink(dds2,contrast=c("condition","2","1"))[1], lfc21, tolerance=1e-6)
+  expect_equal(lfcShrink(dds2,contrast=c("condition","3","2"))[1], lfc32, tolerance=1e-6)
+
+  # test a number of contrast as list options
+  expect_equal(results(dds, contrast=list("condition_3_vs_1","condition_2_vs_1"))[1,2],
+               2, tolerance=1e-6)
+  results(dds, contrast=list("condition_3_vs_1","condition_2_vs_1"), listValues=c(.5,-.5))
+  results(dds, contrast=list("condition_3_vs_1",character()))
+  results(dds, contrast=list("condition_3_vs_1",character()), listValues=c(.5,-.5))
+  results(dds, contrast=list(character(),"condition_2_vs_1"))
+  results(dds, contrast=list(character(),"condition_2_vs_1"), listValues=c(.5,-.5))
+
+  # test no prior on intercept
+  expect_equivalent(attr(dds,"betaPriorVar"), rep(1e6, 4))
+
+  # test thresholding
+  results(dds, lfcThreshold=1)
+  results(dds, lfcThreshold=1, altHypothesis="lessAbs")
+  results(dds, lfcThreshold=1, altHypothesis="greater")
+  results(dds, lfcThreshold=1, altHypothesis="less")
+
+  dds3 <- DESeq(dds, betaPrior=TRUE)
+  expect_error(results(dds3, lfcThreshold=1, altHypothesis="lessAbs"))
+})
+
+test_that("results: designs with zero intercept", {
+  # test some special cases for results()
+  # using designs with +0 
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100,m=12)
+  dds$condition <- factor(rep(1:3,each=4))
+  dds$group <- factor(rep(1:2,length=ncol(dds)))
+
+  counts(dds)[1,] <- rep(c(100L,200L,400L),each=4)
+
+  design(dds) <- ~ condition + 0
+  dds <- DESeq(dds, betaPrior=FALSE)
+
+  expect_equal(results(dds)[1,2], 2, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","2","1"))[1,2], 1, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","3","2"))[1,2], 1, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","1","3"))[1,2], -2, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","1","2"))[1,2], -1, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","2","3"))[1,2], -1, tolerance=.1)
+  expect_error(results(dds, contrast=c("condition","4","1")))
+
+  design(dds) <- ~ group + condition + 0
+  dds <- DESeq(dds, betaPrior=FALSE)
+
+  expect_equal(results(dds)[1,2], 2, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","2","1"))[1,2], 1, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","3","2"))[1,2], 1, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","1","3"))[1,2], -2, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","1","2"))[1,2], -1, tolerance=.1)
+  expect_equal(results(dds, contrast=c("condition","2","3"))[1,2], -1, tolerance=.1)
+})
+
+test_that("results: likelihood ratio test", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=100)
+  dds$group <- factor(rep(1:2,6))
+  design(dds) <- ~ group + condition
+  dds <- DESeq(dds, test="LRT", reduced=~group)
+
+  expect_true(!all(results(dds,name="condition_B_vs_A")$stat ==
+              results(dds,name="condition_B_vs_A",test="Wald")$stat))
+
+  # LFC are already MLE
+  expect_error(results(dds, addMLE=TRUE))
+  expect_error(results(dds, lfcThreshold=1, test="LRT"))
+
+  expect_true(all(results(dds, test="LRT", contrast=c("group","1","2"))$log2FoldChange ==
+              -1 * results(dds, test="LRT", contrast=c("group","2","1"))$log2FoldChange))
+})
+
+test_that("results basics regarding format, tidy, MLE, remove are working", {
+  dds <- makeExampleDESeqDataSet(n=100)
+  dds <- DESeq(dds)
+  res <- results(dds, format="GRanges")
+  expect_warning(results(dds, format="GRangesList"))
+
+  rowRanges(dds) <- as(rowRanges(dds), "GRangesList")
+  dds <- DESeq(dds)
+  expect_message(results(dds, format="GRanges"))
+
+  # check tidy-ness
+  res <- results(dds, tidy=TRUE)
+  expect_true(colnames(res)[1] == "row")
+  expect_true(is(res, "data.frame"))
+
+  # test MLE and 'name'
+  dds2 <- DESeq(dds, betaPrior=TRUE)
+  results(dds2, addMLE=TRUE)
+  expect_error(results(dds, name="condition_B_vs_A", addMLE=TRUE))
+
+  # test remove results
+  dds <- removeResults(dds)
+  expect_true(!any(mcols(mcols(dds))$type == "results"))
+})
diff --git a/tests/testthat/test_rlog.R b/tests/testthat/test_rlog.R
index 8356ab5..70872c9 100644
--- a/tests/testthat/test_rlog.R
+++ b/tests/testthat/test_rlog.R
@@ -1,26 +1,29 @@
-# expect warning on sparsity and large counts
-dds <- makeExampleDESeqDataSet(n=100, m=20)
-idx <- sample(ncol(dds), nrow(dds)/2, TRUE)
-counts(dds)[cbind(1:(nrow(dds)/2), idx)] <- 10000L
-mcols(dds)$dispFit <- .5
-expect_warning({ rld <- rlog(dds, blind=FALSE) })
+context("rlog")
+test_that("rlog works", {
+  # expect warning on sparsity and large counts
+  dds <- makeExampleDESeqDataSet(n=100, m=20)
+  idx <- sample(ncol(dds), nrow(dds)/2, TRUE)
+  counts(dds)[cbind(1:(nrow(dds)/2), idx)] <- 10000L
+  mcols(dds)$dispFit <- .5
+  expect_warning({ rld <- rlog(dds, blind=FALSE) })
 
-# test rlog basics/errors
-dds <- makeExampleDESeqDataSet(n=20, m=4)
-colnames(dds) <- NULL
-rlog(dds)
-head(rlog(assay(dds)))
-expect_error(rlog(dds, intercept=rep(1,10)))
+  # test rlog basics/errors
+  dds <- makeExampleDESeqDataSet(n=20, m=4)
+  colnames(dds) <- NULL
+  rlog(dds)
+  head(rlog(assay(dds)))
+  expect_error(rlog(dds, intercept=rep(1,10)))
 
-mcols(dds)$dispFit <- rep(.5, 20)
-rlog(dds, blind=FALSE, intercept=rep(1,20))
+  mcols(dds)$dispFit <- rep(.5, 20)
+  rlog(dds, blind=FALSE, intercept=rep(1,20))
 
-expect_error(rlogData(dds))
-expect_error(rlogData(dds, intercept=rep(1,10)))
+  expect_error(rlogData(dds))
+  expect_error(rlogData(dds, intercept=rep(1,10)))
 
-# test normTranform
-dds <- makeExampleDESeqDataSet(n=50, m=10)
-nt <- normTransform(dds)
-plotPCA(nt)
+  # test normTranform
+  dds <- makeExampleDESeqDataSet(n=50, m=10)
+  nt <- normTransform(dds)
+  plotPCA(nt)
 
-rld <- rlog(counts(dds))
+  rld <- rlog(counts(dds))
+})
diff --git a/tests/testthat/test_size_factor.R b/tests/testthat/test_size_factor.R
index 64f2566..27cc406 100644
--- a/tests/testthat/test_size_factor.R
+++ b/tests/testthat/test_size_factor.R
@@ -1,23 +1,47 @@
-# size factor tests
-m <- matrix(1:16, ncol=4)
-expect_error(estimateSizeFactorsForMatrix(m, geoMeans=1:5))
-expect_error(estimateSizeFactorsForMatrix(m, geoMeans=rep(0,4)))
-expect_error(estimateSizeFactorsForMatrix(m, controlGenes="foo"))
-estimateSizeFactorsForMatrix(m, geoMeans=1:4)
-estimateSizeFactorsForMatrix(m, controlGenes=1:2)
+context("size_factor")
+test_that("size factor works", {
+  
+  # size factor error checking
+  m <- matrix(1:16, ncol=4)
+  expect_error(estimateSizeFactorsForMatrix(m, geoMeans=1:5))
+  expect_error(estimateSizeFactorsForMatrix(m, geoMeans=rep(0,4)))
+  expect_error(estimateSizeFactorsForMatrix(m, controlGenes="foo"))
+  estimateSizeFactorsForMatrix(m, geoMeans=1:4)
+  estimateSizeFactorsForMatrix(m, controlGenes=1:2)
 
-# iterate method
-set.seed(1)
-true.sf <- 2^(rep(c(-2,-1,0,0,1,2),each=2))
-dds <- makeExampleDESeqDataSet(sizeFactors=true.sf, n=100)
-cts <- counts(dds)
-idx <- cbind(seq_len(nrow(cts)), sample(ncol(dds), nrow(cts), replace=TRUE))
-cts[idx] <- 0L
-cts[1,1] <- 1000000L # an outlier
-counts(dds) <- cts
-dds <- estimateSizeFactors(dds, type="iterate")
-sf <- sizeFactors(dds)
-coefs <- coef(lm(sf ~ true.sf))
-expect_true(abs(coefs[1]) < .1)
-expect_true(abs(coefs[2] - 1) < .1)
+  # norm matrix works
+  nm <- m / exp(rowMeans(log(m))) # divide out the geometric mean
+  true.sf <- c(2,1,1,.5)
+  counts <- sweep(2*m, 2, true.sf, "*")
+  dds <- DESeqDataSetFromMatrix(counts, data.frame(x=1:4), ~1)
+  dds <- estimateSizeFactors(dds, normMatrix=nm)
+  expect_equal((normalizationFactors(dds)/nm)[1,], true.sf)
+  
+  # make some counts with zeros
+  set.seed(1)
+  true.sf <- 2^(rep(c(-2,-1,0,0,1,2),each=2))
+  dmr <- function(x) 0.01
+  dds <- makeExampleDESeqDataSet(sizeFactors=true.sf, n=100, dispMeanRel=dmr)
+  cts <- counts(dds)
+  idx <- cbind(seq_len(nrow(cts)), sample(ncol(dds), nrow(cts), replace=TRUE))
+  cts[idx] <- 0L
+  cts[1,1] <- 1000000L # an outlier
+  counts(dds) <- cts
 
+  # positive counts method
+  dds <- estimateSizeFactors(dds, type="poscounts")
+  sf <- sizeFactors(dds)
+  plot(true.sf, sf);abline(0,1)
+  coefs <- coef(lm(sf ~ true.sf))
+  expect_true(abs(coefs[1]) < .1)
+  expect_true(abs(coefs[2] - 1) < .1)
+  
+  # iterate method
+  dds <- estimateSizeFactors(dds, type="iterate")
+  sf <- sizeFactors(dds)
+  plot(true.sf, sf);abline(0,1)
+  coefs <- coef(lm(sf ~ true.sf))
+  expect_true(abs(coefs[1]) < .1)
+  expect_true(abs(coefs[2] - 1) < .1)
+
+})
diff --git a/tests/testthat/test_tximport.R b/tests/testthat/test_tximport.R
index fc1e973..47e589c 100644
--- a/tests/testthat/test_tximport.R
+++ b/tests/testthat/test_tximport.R
@@ -1,22 +1,28 @@
-library("tximport")
-library("tximportData")
-library("readr")
-dir <- system.file("extdata", package="tximportData")
-samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
-files <- file.path(dir,"salmon", samples$run, "quant.sf")
-names(files) <- paste0("sample",1:6)
-tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
-txi <- tximport(files, type="salmon", tx2gene=tx2gene, reader=read_tsv)
-dds <- DESeqDataSetFromTximport(txi, samples, ~1)
+context("tximport")
+test_that("tximport works", {
+  library("tximport")
+  library("tximportData")
+  library("readr")
+  dir <- system.file("extdata", package="tximportData")
+  samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
+  files <- file.path(dir,"salmon", samples$run, "quant.sf")
+  names(files) <- paste0("sample",1:6)
+  tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
+  
+  txi <- tximport(files, type="salmon", tx2gene=tx2gene)
 
-# test fpkm
+  dds <- DESeqDataSetFromTximport(txi, samples, ~1)
 
-exprs <- fpm(dds)
-exprs <- fpkm(dds)
-
-# test length of 0
-
-txi2 <- txi
-txi2$length[1,1] <- 0
-expect_error(dds2 <- DESeqDataSetFromTximport(txi2, samples, ~1), "lengths")
+  # test library size correction taking into account
+  # the average transcript lengths
+  dds <- estimateSizeFactors(dds)
+  
+  # test fpkm
+  exprs <- fpm(dds)
+  exprs <- fpkm(dds)
 
+  # test length of 0
+  txi2 <- txi
+  txi2$length[1,1] <- 0
+  expect_error(dds2 <- DESeqDataSetFromTximport(txi2, samples, ~1), "lengths")
+})
diff --git a/tests/testthat/test_unmix_samples.R b/tests/testthat/test_unmix_samples.R
new file mode 100644
index 0000000..e22be78
--- /dev/null
+++ b/tests/testthat/test_unmix_samples.R
@@ -0,0 +1,51 @@
+context("unmix samples")
+test_that("unmixing samples works", {
+
+  set.seed(1)
+  n <- 2000
+  a <- runif(n)
+  b <- runif(n)
+  c <- runif(n)
+  counts <- matrix(nrow=n, ncol=8)
+  disp <- 0.01
+
+  counts[,1] <- rnbinom(n, mu=1e4 * a, size=1/disp)
+  counts[,2] <- rnbinom(n, mu=1e4 * b, size=1/disp)
+  counts[,3] <- rnbinom(n, mu=1e4 * c, size=1/disp)
+  counts[,4] <- rnbinom(n, mu=1e4 * (.75*a + .25*b), size=1/disp)
+  counts[,5] <- rnbinom(n, mu=1e4 * (.5*a + .5*b), size=1/disp)
+  counts[,6] <- rnbinom(n, mu=1e4 * (.25*a + .75*b), size=1/disp)
+  counts[,7] <- rnbinom(n, mu=1e4 * (.33*a + .33*b + .33*c), size=1/disp)
+  counts[,8] <- rnbinom(n, mu=1e4 * (.25*a + .25*b + .5*c), size=1/disp)
+  coldata <- data.frame(a=c(1,0,0,.75,.5,.25,.33,.25),
+                        b=c(0,1,0,.25,.5,.75,.33,.25),
+                        c=c(0,0,1,  0,  0, 0,.33,.5))
+
+  dds <- DESeqDataSetFromMatrix(counts, coldata, ~1)
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersions(dds, fitType="mean")
+  vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+  #library(ggplot2)
+  #plotPCA(vsd, intgroup=c("a","b","c")) + geom_point(size=5)
+
+  pure <- matrix(rnbinom(3*n,mu=1e4*c(a,b,c),size=1/disp),ncol=3)
+  colnames(pure) <- c("a","b","c")
+  
+  x <- counts
+  
+  alpha <- attr(dispersionFunction(dds),"mean")
+
+  mix <- unmix(counts, pure=pure, alpha=alpha, quiet=TRUE)
+
+  max(abs(dds$a - mix[,1]))
+  max(abs(dds$b - mix[,2]))
+  max(abs(dds$c - mix[,3]))
+  
+  expect_lt(max(abs(dds$a - mix[,1])), .01)
+  expect_lt(max(abs(dds$b - mix[,2])), .01)
+  expect_lt(max(abs(dds$c - mix[,3])), .01)
+
+  # test the shifted log (designed for TPMs)
+  mix2 <- unmix(counts, pure=pure, shift=0.5, quiet=TRUE)
+  
+})
diff --git a/tests/testthat/test_vst.R b/tests/testthat/test_vst.R
index d5e4286..a511d9a 100644
--- a/tests/testthat/test_vst.R
+++ b/tests/testthat/test_vst.R
@@ -1,31 +1,34 @@
-dds <- makeExampleDESeqDataSet(n=100, m=4)
-design(dds) <- ~ 1
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersionsGeneEst(dds)
-dds <- estimateDispersionsFit(dds, fitType="parametric")
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
-dds <- estimateDispersionsFit(dds, fitType="local")
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
-dds <- estimateDispersionsFit(dds, fitType="mean")
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)  
+context("vst")
+test_that("vst works", {
+  dds <- makeExampleDESeqDataSet(n=100, m=4)
+  design(dds) <- ~ 1
+  dds <- estimateSizeFactors(dds)
+  dds <- estimateDispersionsGeneEst(dds)
+  dds <- estimateDispersionsFit(dds, fitType="parametric")
+  vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+  dds <- estimateDispersionsFit(dds, fitType="local")
+  vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+  dds <- estimateDispersionsFit(dds, fitType="mean")
+  vsd <- varianceStabilizingTransformation(dds, blind=FALSE)  
 
-# test VST basics/errors
-dds <- makeExampleDESeqDataSet(n=20, m=4)
-colnames(dds) <- NULL
-varianceStabilizingTransformation(dds)
-head(varianceStabilizingTransformation(assay(dds)))
-expect_error(getVarianceStabilizedData(dds))
+  # test VST basics/errors
+  dds <- makeExampleDESeqDataSet(n=20, m=4)
+  colnames(dds) <- NULL
+  varianceStabilizingTransformation(dds)
+  head(varianceStabilizingTransformation(assay(dds)))
+  expect_error(getVarianceStabilizedData(dds))
 
-# test just matrix
-vsd <- varianceStabilizingTransformation(counts(dds))
+  # test just matrix
+  vsd <- varianceStabilizingTransformation(counts(dds))
 
-# test fast VST based on subsampling
-dds <- makeExampleDESeqDataSet(n=20000, m=10)
-vsd <- vst(dds)
-vsd <- vst(counts(dds))
+  # test fast VST based on subsampling
+  dds <- makeExampleDESeqDataSet(n=20000, m=10)
+  vsd <- vst(dds)
+  vsd <- vst(counts(dds))
 
-# test VST and normalization factors
-dds <- makeExampleDESeqDataSet(n=100, m=10, betaSD=1.5)
-nf <- matrix(exp(rnorm(1000,0,.2)),ncol=10)
-normalizationFactors(dds) <- nf
-vsd <- varianceStabilizingTransformation(dds, fitType="local")
+  # test VST and normalization factors
+  dds <- makeExampleDESeqDataSet(n=100, m=10, betaSD=1.5)
+  nf <- matrix(exp(rnorm(1000,0,.2)),ncol=10)
+  normalizationFactors(dds) <- nf
+  vsd <- varianceStabilizingTransformation(dds, fitType="local")
+})
diff --git a/tests/testthat/test_weights.R b/tests/testthat/test_weights.R
new file mode 100644
index 0000000..96aa6c4
--- /dev/null
+++ b/tests/testthat/test_weights.R
@@ -0,0 +1,91 @@
+context("weights")
+test_that("weights work", {
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=10)
+  dds <- DESeq(dds, quiet=TRUE)
+  dds2 <- dds
+  w <- matrix(1, nrow=nrow(dds), ncol=12)
+  w[1,1] <- 0
+  assays(dds2)[["weights"]] <- w
+  dds2 <- nbinomWaldTest(dds2)
+  dds3 <- dds[,-1]
+  dds3 <- nbinomWaldTest(dds3)
+  
+  expect_equal(results(dds2)$log2FoldChange[1], results(dds3)$log2FoldChange[1])
+  expect_equal(results(dds2)$lfcSE[1], results(dds3)$lfcSE[1])
+  expect_equal(mcols(dds2)[1,"deviance"],mcols(dds3)[1,"deviance"])
+  
+  nf <- matrix(sizeFactors(dds),nrow=nrow(dds),ncol=ncol(dds),byrow=TRUE)
+  
+  o <- fitNbinomGLMsOptim(object=dds,
+                          modelMatrix=model.matrix(design(dds), colData(dds)),
+                          lambda=rep(1e-6, 2),
+                          rowsForOptim=1,
+                          rowStable=TRUE,
+                          normalizationFactors=nf,
+                          alpha_hat=dispersions(dds),
+                          weights=w,
+                          useWeights=TRUE,
+                          betaMatrix=matrix(0,nrow=nrow(dds),ncol=2),
+                          betaSE=matrix(0,nrow=nrow(dds),ncol=2),
+                          betaConv=rep(FALSE,nrow(dds)),
+                          beta_mat=matrix(0,nrow=nrow(dds),ncol=2),
+                          mu=matrix(0,nrow=nrow(dds),ncol=ncol(dds)),
+                          logLike=rep(0,nrow(dds)))
+
+  expect_equal(results(dds3)$log2FoldChange[1], o$betaMatrix[1,2], tolerance=1e-4)
+
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=10)
+  w <- matrix(1, nrow=nrow(dds), ncol=12)
+  w[1,1] <- 0
+  assays(dds)[["weights"]] <- w
+  dds <- DESeq(dds, betaPrior=TRUE, quiet=TRUE)
+  
+  design(dds) <- ~1
+  suppressWarnings({ dds <- DESeq(dds, quiet=TRUE) })
+  dds2 <- dds
+  assays(dds2)[["weights"]] <- w
+  dds2 <- nbinomWaldTest(dds2)
+  dds3 <- dds[,-1]
+  dds3 <- nbinomWaldTest(dds3)
+
+  expect_equal(results(dds2)$log2FoldChange[1], results(dds3)$log2FoldChange[1])
+  expect_equal(results(dds2)$lfcSE[1], results(dds3)$lfcSE[1])
+  expect_equal(mcols(dds2)[1,"deviance"],mcols(dds3)[1,"deviance"])
+
+  set.seed(1)
+  dds <- makeExampleDESeqDataSet(n=10)
+  counts(dds)[1,1] <- 100L
+  sizeFactors(dds) <- rep(1,12)
+  dds <- estimateDispersions(dds)
+  dds2 <- dds
+  w <- matrix(1, nrow=nrow(dds), ncol=12)
+  w[1,1] <- 0
+  assays(dds2)[["weights"]] <- w
+  dds2 <- estimateDispersions(dds2)
+  dds3 <- dds[,-1]
+  dds3 <- estimateDispersions(dds3)
+  
+  expect_equal(mcols(dds2)[1,"dispGeneEst"],mcols(dds3)[1,"dispGeneEst"],tolerance=1e-3)
+  # MAP estimates won't be equal because of different dispersion prior widths...
+  expect_true(mcols(dds)[1,"dispMAP"] > mcols(dds2)[1,"dispMAP"])
+
+  # test grid of weights
+  ## set.seed(1)
+  ## dds <- makeExampleDESeqDataSet(n=10, dispMeanRel=function(x) 0.01)
+  ## counts(dds)[1,1] <- 100L
+  ## sizeFactors(dds) <- rep(1,12)
+  ## dds <- DESeq(dds, quiet=TRUE, fitType="mean")
+  ## dds2 <- dds
+  ## w <- matrix(1, nrow=nrow(dds), ncol=12)
+  ## lfc <- sapply(1:11, function(i) {
+  ##   w[1,1] <- (i-1)/10
+  ##   assays(dds2)[["weights"]] <- w
+  ##   dds2 <- DESeq(dds2, quiet=TRUE, fitType="mean")
+  ##   results(dds2)$log2FoldChange[1]
+  ## })
+  ## plot((1:11-1)/10, lfc, type="b")
+  ## abline(h=results(dds)$log2FoldChange[1])
+  
+})
diff --git a/tests/testthat/test_zero_zero.R b/tests/testthat/test_zero_zero.R
deleted file mode 100644
index fd3e0c0..0000000
--- a/tests/testthat/test_zero_zero.R
+++ /dev/null
@@ -1,18 +0,0 @@
-# test comparison of two groups with all zeros
-dds <- makeExampleDESeqDataSet(m=8, n=100, sizeFactors=c(1,1,.5,.5,1,1,2,2))
-dds$condition <- factor(rep(c("A","B","C","D"),each=2))
-counts(dds)[1,] <- c(100L,110L,0L,0L,100L,110L,0L,0L)
-counts(dds)[2,] <- rep(0L, 8)
-dds <- DESeq(dds)
-res <- results(dds, contrast=c("condition","D","B"))[1,]
-expect_equal(res$log2FoldChange, 0)
-res <- results(dds, contrast=c(0,0,-1,0,1))[1,]
-expect_equal(res$log2FoldChange, 0)
-res <- results(dds,c(0,0,0,0,1))[1,]
-expect_true(res$log2FoldChange != 0)
-# if all samples have 0, should be NA
-res <- results(dds, contrast=c("condition","D","B"))[2,]
-expect_true(is.na(res$log2FoldChange))
-res <- results(dds, contrast=c(0,0,-1,0,1))[2,]
-expect_true(is.na(res$log2FoldChange))
-
diff --git a/vignettes/DESeq2.Rmd b/vignettes/DESeq2.Rmd
new file mode 100644
index 0000000..2969fc5
--- /dev/null
+++ b/vignettes/DESeq2.Rmd
@@ -0,0 +1,2420 @@
+---
+title: "Analyzing RNA-seq data with DESeq2"
+author: "Michael I. Love, Simon Anders, and Wolfgang Huber"
+date: "`r BiocStyle::doc_date()`"
+package: "`r BiocStyle::pkg_ver('DESeq2')`"
+abstract: >
+  A basic task in the analysis of count data from RNA-seq is the
+  detection of differentially expressed genes. The count data are
+  presented as a table which reports, for each sample, the number of
+  sequence fragments that have been assigned to each gene. Analogous
+  data also arise for other assay types, including comparative ChIP-Seq,
+  HiC, shRNA screening, mass spectrometry.  An important analysis
+  question is the quantification and statistical inference of systematic
+  changes between conditions, as compared to within-condition
+  variability. The package DESeq2 provides methods to test for
+  differential expression by use of negative binomial generalized linear
+  models; the estimates of dispersion and logarithmic fold changes
+  incorporate data-driven prior distributions This vignette explains the
+  use of the package and demonstrates typical workflows.
+  [An RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene/)
+  on the Bioconductor website covers similar material to this vignette
+  but at a slower pace, including the generation of count matrices from
+  FASTQ files.
+  DESeq2 package version: `r packageVersion("DESeq2")`
+output:
+  rmarkdown::html_document:
+    highlight: pygments
+    toc: true
+    fig_width: 5
+bibliography: library.bib
+vignette: >
+  %\VignetteIndexEntry{Analyzing RNA-seq data with DESeq2}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding[utf8]{inputenc}
+---
+
+
+<!-- This is the source document -->
+
+
+```{r setup, echo=FALSE, results="hide"}
+knitr::opts_chunk$set(tidy=FALSE, cache=TRUE,
+                      dev="png",
+                      message=FALSE, error=FALSE, warning=TRUE)
+```	
+
+# Standard workflow
+
+**If you use DESeq2 in published research, please cite:**
+
+> Love, M.I., Huber, W., Anders, S.,
+> Moderated estimation of fold change and dispersion for RNA-seq data with DESeq2, 
+> *Genome Biology* 2014, **15**:550.
+> [10.1186/s13059-014-0550-8](http://dx.doi.org/10.1186/s13059-014-0550-8)
+
+Other Bioconductor packages with similar aims are
+[edgeR](http://bioconductor.org/packages/edgeR),
+[limma](http://bioconductor.org/packages/limma),
+[DSS](http://bioconductor.org/packages/DSS),
+[EBSeq](http://bioconductor.org/packages/EBSeq), and 
+[baySeq](http://bioconductor.org/packages/baySeq).
+
+## Quick start
+
+Here we show the most basic steps for a differential expression
+analysis. There are a variety of steps upstream of DESeq2 that result
+in the generation of counts or estimated counts for each sample, which
+we will discuss in the sections below. This code chunk assumes that
+you have a count matrix called `cts` and a table of sample
+information called `coldata`.  The `design` indicates how to model the
+samples, here, that we want to measure the effect of the condition,
+controlling for batch differences. The two factor variables `batch`
+and `condition` should be columns of `coldata`.
+
+```{r quickStart, eval=FALSE}
+dds <- DESeqDataSetFromMatrix(countData = cts,
+                              colData = coldata,
+                              design= ~ batch + condition)
+dds <- DESeq(dds)
+res <- results(dds, contrast=c("condition","treated","control"))
+```
+
+The following starting functions will be explained below:
+
+* If you have transcript quantification files, as produced by
+  *Salmon*, *Sailfish*, or *kallisto*, you would use
+  *DESeqDataSetFromTximport*.
+* If you have *htseq-count* files, the first line would use
+  *DESeqDataSetFromHTSeq*.
+* If you have a *RangedSummarizedExperiment*, the first line would use 
+  *DESeqDataSet*.
+
+## How to get help for DESeq2
+
+Any and all DESeq2 questions should be posted to the 
+**Bioconductor support site**, which serves as a searchable knowledge
+base of questions and answers:
+
+<https://support.bioconductor.org>
+
+Posting a question and tagging with "DESeq2" will automatically send
+an alert to the package authors to respond on the support site.  See
+the first question in the list of [Frequently Asked Questions](#FAQ)
+(FAQ) for information about how to construct an informative post. 
+
+You should **not** email your question to the package authors, as we will
+just reply that the question should be posted to the 
+**Bioconductor support site**.
+
+## Input data
+
+### Why un-normalized counts?
+
+As input, the DESeq2 package expects count data as obtained, e.g.,
+from RNA-seq or another high-throughput sequencing experiment, in the form of a
+matrix of integer values. The value in the *i*-th row and the *j*-th column of
+the matrix tells how many reads can be assigned to gene *i* in sample *j*.
+Analogously, for other types of assays, the rows of the matrix might correspond
+e.g. to binding regions (with ChIP-Seq) or peptide sequences (with
+quantitative mass spectrometry). We will list method for obtaining count matrices
+in sections below.
+
+The values in the matrix should be un-normalized counts or estimated
+counts of sequencing reads (for
+single-end RNA-seq) or fragments (for paired-end RNA-seq). 
+The [RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene/)
+describes multiple techniques for preparing such count matrices.  It
+is important to provide count matrices as input for DESeq2's
+statistical model [@Love2014] to hold, as only the count values allow
+assessing the measurement precision correctly. The DESeq2 model
+internally corrects for library size, so transformed or normalized
+values such as counts scaled by library size should not be used as
+input.
+
+### The DESeqDataSet
+
+The object class used by the DESeq2 package to store the read counts 
+and the intermediate estimated quantities during statistical analysis
+is the *DESeqDataSet*, which will usually be represented in the code
+here as an object `dds`.
+
+A technical detail is that the *DESeqDataSet* class extends the
+*RangedSummarizedExperiment* class of the 
+[SummarizedExperiment](http://bioconductor.org/packages/SummarizedExperiment) package. 
+The "Ranged" part refers to the fact that the rows of the assay data 
+(here, the counts) can be associated with genomic ranges (the exons of genes).
+This association facilitates downstream exploration of results, making use of
+other Bioconductor packages' range-based functionality
+(e.g. find the closest ChIP-seq peaks to the differentially expressed genes).
+
+A *DESeqDataSet* object must have an associated *design formula*.
+The design formula expresses the variables which will be
+used in modeling. The formula should be a tilde (~) followed by the
+variables with plus signs between them (it will be coerced into an
+*formula* if it is not already). The design can be changed later, 
+however then all differential analysis steps should be repeated, 
+as the design formula is used to estimate the dispersions and 
+to estimate the log2 fold changes of the model. 
+
+*Note*: In order to benefit from the default settings of the
+package, you should put the variable of interest at the end of the
+formula and make sure the control level is the first level.
+
+We will now show 4 ways of constructing a *DESeqDataSet*, depending
+on what pipeline was used upstream of DESeq2 to generated counts or
+estimated counts:
+
+1) From [transcript abundance files and tximport](#tximport)
+2) From a [count matrix](#countmat)
+3) From [htseq-count files](#htseq)
+4) From a [SummarizedExperiment](#se) object
+
+<a name="tximport"/>
+
+### Transcript abundance files and *tximport* input
+
+A newer and recommended pipeline is to use fast transcript 
+abundance quantifiers upstream of DESeq2, and then to create
+gene-level count matrices for use with DESeq2 
+by importing the quantification data using the
+[tximport](http://bioconductor.org/packages/tximport) 
+package. This workflow allows users to import transcript abundance estimates
+from a variety of external software, including the following methods:
+
+* [Salmon](http://combine-lab.github.io/salmon/)
+  [@Patro2016Salmon]
+* [Sailfish](http://www.cs.cmu.edu/~ckingsf/software/sailfish/)
+  [@Patro2014Sailfish]
+* [kallisto](https://pachterlab.github.io/kallisto/about.html)
+  [@Bray2016Near]
+* [RSEM](http://deweylab.github.io/RSEM/)
+  [@Li2011RSEM]
+
+Some advantages of using the above methods for transcript abundance
+estimation are: 
+(i) this approach corrects for potential changes in gene length across samples 
+(e.g. from differential isoform usage) [@Trapnell2013Differential],
+(ii) some of these methods (*Salmon*, *Sailfish*, *kallisto*) 
+are substantially faster and require less memory
+and disk usage compared to alignment-based methods that require
+creation and storage of BAM files, and
+(iii) it is possible to avoid discarding those fragments that can
+align to multiple genes with homologous sequence, thus increasing
+sensitivity [@Robert2015Errors].
+
+Full details on the motivation and methods for importing transcript
+level abundance and count estimates, summarizing to gene-level count matrices 
+and producing an offset which corrects for potential changes in average
+transcript length across samples are described in [@Soneson2015].
+Note that the tximport-to-DESeq2 approach uses *estimated* gene
+counts from the transcript abundance quantifiers, but not *normalized*
+counts.
+
+Here, we demonstrate how to import transcript abundances
+and construct of a gene-level *DESeqDataSet* object
+from *Salmon* `quant.sf` files, which are
+stored in the [tximportData](http://bioconductor.org/packages/tximportData) package.
+You do not need the `tximportData` package for your analysis, it is
+only used here for demonstration.
+
+Note that, instead of locating `dir` using *system.file*,
+a user would typically just provide a path, e.g. `/path/to/quant/files`.
+For a typical use, the `condition` information should already be
+present as a column of the sample table `samples`, while here we
+construct artificial condition labels for demonstration.
+
+
+```{r txiSetup}
+library("tximport")
+library("readr")
+library("tximportData")
+dir <- system.file("extdata", package="tximportData")
+samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
+samples$condition <- factor(rep(c("A","B"),each=3))
+rownames(samples) <- samples$run
+samples[,c("pop","center","run","condition")]
+```
+
+Next we specify the path to the files using the appropriate columns of
+`samples`, and we read in a table that links transcripts to genes for
+this dataset.
+
+```{r txiFiles}
+files <- file.path(dir,"salmon", samples$run, "quant.sf")
+names(files) <- samples$run
+tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
+```
+
+We import the necessary quantification data for DESeq2 using the
+*tximport* function.  For further details on use of *tximport*,
+including the construction of the `tx2gene` table for linking
+transcripts to genes in your dataset, please refer to the 
+[tximport](http://bioconductor.org/packages/tximport) package vignette.
+
+```{r tximport, results="hide"}
+txi <- tximport(files, type="salmon", tx2gene=tx2gene)
+```
+
+Finally, we can construct a *DESeqDataSet* from the `txi` object and
+sample information in `samples`.
+
+```{r txi2dds, results="hide"}
+library("DESeq2")
+ddsTxi <- DESeqDataSetFromTximport(txi,
+                                   colData = samples,
+                                   design = ~ condition)
+```
+
+The `ddsTxi` object here can then be used as `dds` in the
+following analysis steps.
+
+<a name="countmat"/>
+
+### Count matrix input
+
+Alternatively, the function *DESeqDataSetFromMatrix* can be
+used if you already have a matrix of read counts prepared from another
+source. Another method for quickly producing count matrices 
+from alignment files is the *featureCounts* function [@Liao2013feature]
+in the [Rsubread](http://bioconductor.org/packages/Rsubread) package.
+To use *DESeqDataSetFromMatrix*, the user should provide 
+the counts matrix, the information about the samples (the columns of the 
+count matrix) as a *DataFrame* or *data.frame*, and the design formula.
+
+To demonstate the use of *DESeqDataSetFromMatrix*, 
+we will read in count data from the
+[pasilla](http://bioconductor.org/packages/pasilla) package. 
+We read in a count matrix, which we will name `cts`, 
+and the sample information table, which we will name `coldata`. 
+Further below we describe how to extract these objects from,
+e.g. *featureCounts* output. 
+
+```{r loadPasilla}
+library("pasilla")
+pasCts <- system.file("extdata",
+                      "pasilla_gene_counts.tsv",
+                      package="pasilla", mustWork=TRUE)
+pasAnno <- system.file("extdata",
+                       "pasilla_sample_annotation.csv",
+                       package="pasilla", mustWork=TRUE)
+cts <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
+coldata <- read.csv(pasAnno, row.names=1)
+coldata <- coldata[,c("condition","type")]
+```
+
+We examine the count matrix and column data to see if they are consisent:
+
+```{r showPasilla}
+head(cts)
+head(coldata)
+```
+
+Note that these are not in the same order with respect to samples! 
+
+It is critical that the columns of the count matrix and the rows of
+the column data (information about samples) are in the same order.
+We should re-arrange one or the other so that they are consistent in
+terms of sample order (if we do not, later functions would produce
+an error). We additionally need to chop off the `"fb"` of the 
+row names of `coldata`, so the naming is consistent.
+
+```{r reorderPasila}
+rownames(coldata) <- sub("fb","",rownames(coldata))
+all(rownames(coldata) %in% colnames(cts))
+cts <- cts[, rownames(coldata)]
+all(rownames(coldata) == colnames(cts))
+```
+
+If you have used the *featureCounts* function [@Liao2013feature] in the 
+[Rsubread](http://bioconductor.org/packages/Rsubread) package, the matrix of read counts can be directly 
+provided from the `"counts"` element in the list output.
+The count matrix and column data can typically be read into R 
+from flat files using base R functions such as *read.csv*
+or *read.delim*. For *htseq-count* files, see the dedicated input
+function below. 
+
+With the count matrix, `cts`, and the sample
+information, `coldata`, we can construct a *DESeqDataSet*:
+
+```{r matrixInput}
+library("DESeq2")
+dds <- DESeqDataSetFromMatrix(countData = cts,
+                              colData = coldata,
+                              design = ~ condition)
+dds
+```
+
+If you have additional feature data, it can be added to the
+*DESeqDataSet* by adding to the metadata columns of a newly
+constructed object. (Here we add redundant data just for demonstration, as
+the gene names are already the rownames of the `dds`.)
+
+```{r addFeatureData}
+featureData <- data.frame(gene=rownames(cts))
+mcols(dds) <- DataFrame(mcols(dds), featureData)
+mcols(dds)
+```
+
+<a name="htseq"/>
+
+### *htseq-count* input
+
+You can use the function *DESeqDataSetFromHTSeqCount* if you
+have used *htseq-count* from the 
+[HTSeq](http://www-huber.embl.de/users/anders/HTSeq) 
+python package [@Anders:2014:htseq].
+For an example of using the python scripts, see the
+[pasilla](http://bioconductor.org/packages/pasilla) data package. First you will want to specify a
+variable which points to the directory in which the *htseq-count*
+output files are located. 
+
+```{r htseqDirI, eval=FALSE}
+directory <- "/path/to/your/files/"
+```
+
+However, for demonstration purposes only, the following line of
+code points to the directory for the demo *htseq-count* output
+files packages for the [pasilla](http://bioconductor.org/packages/pasilla) package.
+
+```{r htseqDirII}
+directory <- system.file("extdata", package="pasilla",
+                         mustWork=TRUE)
+```
+
+We specify which files to read in using *list.files*,
+and select those files which contain the string `"treated"`
+using *grep*. The *sub* function is used to 
+chop up the sample filename to obtain the condition status, or 
+you might alternatively read in a phenotypic table 
+using *read.table*.
+
+```{r htseqInput}
+sampleFiles <- grep("treated",list.files(directory),value=TRUE)
+sampleCondition <- sub("(.*treated).*","\\1",sampleFiles)
+sampleTable <- data.frame(sampleName = sampleFiles,
+                          fileName = sampleFiles,
+                          condition = sampleCondition)
+```
+
+Then we build the *DESeqDataSet* using the following function:
+
+```{r hsteqDds}
+library("DESeq2")
+ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
+                                       directory = directory,
+                                       design= ~ condition)
+ddsHTSeq
+```
+
+<a name="se"/>
+
+### *SummarizedExperiment* input
+
+An example of the steps to produce a *RangedSummarizedExperiment* can
+be found in the [RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene/)
+and in the vignette for the data package [airway](http://bioconductor.org/packages/airway).
+Here we load the *RangedSummarizedExperiment* from that package in
+order to build a *DESeqDataSet*.
+
+```{r loadSumExp}
+library("airway")
+data("airway")
+se <- airway
+```
+The constructor function below shows the generation of a
+*DESeqDataSet* from a *RangedSummarizedExperiment* `se`.
+
+```{r sumExpInput}
+library("DESeq2")
+ddsSE <- DESeqDataSet(se, design = ~ cell + dex)
+ddsSE
+```
+
+### Pre-filtering
+
+While it is not necessary to pre-filter low count genes before running the DESeq2
+functions, there are two reasons which make pre-filtering useful:
+by removing rows in which there are no reads or nearly no reads,
+we reduce the memory size of the `dds` data object and 
+we increase the speed of the transformation
+and testing functions within DESeq2. Here we perform a minimal
+pre-filtering to remove rows that have only 0 or 1 read. Note that more strict
+filtering to increase power is *automatically* 
+applied via [independent filtering](#indfilt) on the mean of
+normalized counts within the *results* function. 
+
+```{r prefilter}
+dds <- dds[ rowSums(counts(dds)) > 1, ]
+``` 
+
+### Note on factor levels 
+
+By default, R will choose a *reference level* for factors based on
+alphabetical order. Then, if you never tell the DESeq2 functions which
+level you want to compare against (e.g. which level represents the
+control group), the comparisons will be based on the alphabetical
+order of the levels. There are two solutions: you can either
+explicitly tell *results* which comparison to make using the
+`contrast` argument (this will be shown later), or you can explicitly
+set the factors levels. Setting the factor levels can be done in two
+ways, either using factor:
+
+```{r factorlvl}
+dds$condition <- factor(dds$condition, levels=c("untreated","treated"))
+``` 
+
+...or using *relevel*, just specifying the reference level:
+
+```{r relevel}
+dds$condition <- relevel(dds$condition, ref="untreated")
+``` 
+
+If you need to subset the columns of a *DESeqDataSet*,
+i.e., when removing certain samples from the analysis, it is possible
+that all the samples for one or more levels of a variable in the design
+formula would be removed. In this case, the *droplevels* function can be used
+to remove those levels which do not have samples in the current *DESeqDataSet*:
+
+```{r droplevels}
+dds$condition <- droplevels(dds$condition)
+``` 
+
+### Collapsing technical replicates
+
+DESeq2 provides a function *collapseReplicates* which can
+assist in combining the counts from technical replicates into single
+columns of the count matrix. The term *technical replicate* 
+implies multiple sequencing runs of the same library. 
+You should not collapse biological replicates using this function.
+See the manual page for an example of the use of
+*collapseReplicates*.
+
+### About the pasilla dataset
+
+We continue with the [pasilla](http://bioconductor.org/packages/pasilla) data constructed from the
+count matrix method above. This data set is from an experiment on
+*Drosophila melanogaster* cell cultures and investigated the
+effect of RNAi knock-down of the splicing factor *pasilla*
+[@Brooks2010].  The detailed transcript of the production of
+the [pasilla](http://bioconductor.org/packages/pasilla) data is provided in the vignette of the 
+data package [pasilla](http://bioconductor.org/packages/pasilla).
+
+<a name="de"/>
+
+## Differential expression analysis 
+
+The standard differential expression analysis steps are wrapped
+into a single function, *DESeq*. The estimation steps performed
+by this function are described [below](#theory), in the manual page for
+`?DESeq` and in the Methods section of the DESeq2 publication [@Love2014]. 
+
+Results tables are generated using the function *results*, which
+extracts a results table with log2 fold changes, *p* values and adjusted
+*p* values. With no additional arguments to *results*, the log2 fold change and
+Wald test *p* value will be for the last variable in the design
+formula, and if this is a factor,
+the comparison will be the last level of this variable over the first
+level. However, the order of the variables of the design do not matter
+so long as the user specifies the comparison using the `name` or
+`contrast` arguments of *results* (described later and in `?results`).
+
+Details about the comparison are printed to the console, above the
+results table. The text, `condition treated vs untreated`, tells you that the
+estimates are of the logarithmic fold change log2(treated/untreated).
+
+```{r deseq}
+dds <- DESeq(dds)
+res <- results(dds)
+res
+``` 
+
+<a name="lfcShrink"/>
+
+In previous versions of DESeq2, the *DESeq* function by default 
+would produce moderated, or shrunken, log2 fold changes through the
+use of the `betaPrior` argument. In version 1.16 and higher, we have
+split the moderation of log2 fold changes into a separate function,
+*lfcShrink*, for reasons described in the [changes section](#changes)
+below. 
+
+Here we provide the `dds` object and the number of the
+coefficient we want to moderate. It is also possible to specify a
+`contrast`, instead of `coef`, which works the same as the `contrast`
+argument of the *results* function.
+If a results object is provided, the `log2FoldChange` column will be 
+swapped out, otherwise *lfcShrink* returns a vector of shrunken log2
+fold changes.
+
+```{r lfcShrink}
+resultsNames(dds)
+resLFC <- lfcShrink(dds, coef=2, res=res)
+resLFC
+```
+
+The above steps should take less than 30 seconds for most analyses. For
+experiments with many samples (e.g. 100 samples), one can take
+advantage of parallelized computation.  Both of the above functions
+have an argument `parallel` which if set to `TRUE` can
+be used to distribute computation across cores specified by the
+*register* function of [BiocParallel](http://bioconductor.org/packages/BiocParallel). For example,
+the following chunk (not evaluated here), would register 4 cores, and
+then the two functions above, with `parallel=TRUE`, would
+split computation over these cores. 
+
+```{r parallel, eval=FALSE}
+library("BiocParallel")
+register(MulticoreParam(4))
+```
+
+We can order our results table by the smallest adjusted *p* value:
+
+```{r resOrder}
+resOrdered <- res[order(res$padj),]
+```
+
+We can summarize some basic tallies using the
+*summary* function.
+
+```{r sumRes}
+summary(res)
+``` 
+
+How many adjusted p-values were less than 0.1?
+
+```{r sumRes01}
+sum(res$padj < 0.1, na.rm=TRUE)
+``` 
+
+The *results* function contains a number of arguments to
+customize the results table which is generated. You can read about
+these arguments by looking up `?results`.
+Note that the *results* function automatically performs independent
+filtering based on the mean of normalized counts for each gene,
+optimizing the number of genes which will have an adjusted *p* value
+below a given FDR cutoff, `alpha`.
+Independent filtering is further discussed [below](#indfilt).
+By default the argument `alpha` is set to $0.1$.  If the adjusted *p*
+value cutoff will be a value other than $0.1$, `alpha` should be set to
+that value:
+
+```{r resAlpha05}
+res05 <- results(dds, alpha=0.05)
+summary(res05)
+sum(res05$padj < 0.05, na.rm=TRUE)
+``` 
+
+<a name="IHW"/>
+
+A generalization of the idea of *p* value filtering is to *weight* hypotheses
+to optimize power. A Bioconductor package, [IHW](http://bioconductor.org/packages/IHW), is available
+that implements the method of *Independent Hypothesis Weighting* [@Ignatiadis2015].
+Here we show the use of *IHW* for *p* value adjustment of DESeq2 results.
+For more details, please see the vignette of the [IHW](http://bioconductor.org/packages/IHW) package.
+Note that the *IHW* result object is stored in the metadata.
+
+```{r IHW}
+library("IHW")
+resIHW <- results(dds, filterFun=ihw)
+summary(resIHW)
+sum(resIHW$padj < 0.1, na.rm=TRUE)
+metadata(resIHW)$ihwResult
+``` 
+
+If a multi-factor design is used, or if the variable in the design
+formula has more than two levels, the `contrast` argument of
+*results* can be used to extract different comparisons from
+the *DESeqDataSet* returned by *DESeq*.
+The use of the `contrast` argument is further discussed [below](#contrasts).
+
+For advanced users, note that all the values calculated by the DESeq2 
+package are stored in the *DESeqDataSet* object, and access 
+to these values is discussed [below](#access).
+
+## Exploring and exporting results
+
+### MA-plot
+
+In DESeq2, the function *plotMA* shows the log2
+fold changes attributable to a given variable over the mean of
+normalized counts for all the samples in the *DESeqDataSet*.
+Points will be colored red if the adjusted *p* value is less than 0.1.
+Points which fall out of the window are plotted as open triangles pointing 
+either up or down.
+
+```{r MA}
+plotMA(res, ylim=c(-2,2))
+```
+
+It is also useful to visualize the MA-plot for the shrunken log2 fold
+changes, which remove the noise associated with log2 fold changes from
+low count genes without requiring arbitrary filtering thresholds.
+
+```{r shrunkMA}
+plotMA(resLFC, ylim=c(-2,2))
+```
+
+After calling *plotMA*, one can use the function
+*identify* to interactively detect the row number of
+individual genes by clicking on the plot. One can then recover
+the gene identifiers by saving the resulting indices:
+
+```{r MAidentify, eval=FALSE}
+idx <- identify(res$baseMean, res$log2FoldChange)
+rownames(res)[idx]
+``` 
+
+### Plot counts 
+
+It can also be useful to examine the counts of reads for a single gene
+across the groups. A simple function for making this
+plot is *plotCounts*, which normalizes counts by sequencing depth
+and adds a pseudocount of 1/2 to allow for log scale plotting.
+The counts are grouped by the variables in `intgroup`, where
+more than one variable can be specified. Here we specify the gene
+which had the smallest *p* value from the results table created
+above. You can select the gene to plot by rowname or by numeric index.
+
+```{r plotCounts}
+plotCounts(dds, gene=which.min(res$padj), intgroup="condition")
+``` 
+
+For customized plotting, an argument `returnData` specifies
+that the function should only return a *data.frame* for
+plotting with *ggplot*.
+
+```{r plotCountsAdv}
+d <- plotCounts(dds, gene=which.min(res$padj), intgroup="condition", 
+                returnData=TRUE)
+library("ggplot2")
+ggplot(d, aes(x=condition, y=count)) + 
+  geom_point(position=position_jitter(w=0.1,h=0)) + 
+  scale_y_log10(breaks=c(25,100,400))
+``` 
+
+### More information on results columns 
+
+Information about which variables and tests were used can be found by calling
+the function *mcols* on the results object.
+
+```{r metadata}
+mcols(res)$description
+```
+
+For a particular gene, a log2 fold change of -1 for
+`condition treated vs untreated` means that the treatment
+induces a multiplicative change in observed gene expression level of
+$2^{-1} = 0.5$ compared to the untreated condition. If the variable of
+interest is continuous-valued, then the reported log2 fold change is
+per unit of change of that variable.
+
+<a name="pvaluesNA"/>
+
+**Note on p-values set to NA**: some values in the results table
+can be set to `NA` for one of the following reasons:
+
+* If within a row, all samples have zero counts, 
+  the `baseMean` column will be zero, and the
+  log2 fold change estimates, *p* value and adjusted *p* value
+  will all be set to `NA`.
+* If a row contains a sample with an extreme count outlier
+  then the *p* value and adjusted *p* value will be set to `NA`.
+  These outlier counts are detected by Cook's distance. Customization
+  of this outlier filtering and description of functionality for 
+  replacement of outlier counts and refitting is described 
+  [below](#outlier)
+* If a row is filtered by automatic independent filtering, 
+  for having a low mean normalized count, then only the adjusted *p*
+  value will be set to `NA`. 
+  Description and customization of independent filtering is 
+  described [below](#indfilt)
+
+### Rich visualization and reporting of results
+
+**ReportingTools.** An HTML report of the results with plots and sortable/filterable columns
+can be generated using the [ReportingTools](http://bioconductor.org/packages/ReportingTools) package
+on a *DESeqDataSet* that has been processed by the *DESeq* function.
+For a code example, see the *RNA-seq differential expression* vignette at
+the [ReportingTools](http://bioconductor.org/packages/ReportingTools) page, or the manual page for the 
+*publish* method for the *DESeqDataSet* class.
+
+**regionReport.** An HTML and PDF summary of the results with plots
+can also be generated using the [regionReport](http://bioconductor.org/packages/regionReport) package.
+The *DESeq2Report* function should be run on a 
+*DESeqDataSet* that has been processed by the *DESeq* function.
+For more details see the manual page for *DESeq2Report* 
+and an example vignette in the [regionReport](http://bioconductor.org/packages/regionReport) package.
+
+**Glimma.** Interactive visualization of DESeq2 output, 
+including MA-plots (also called MD-plot) can be generated using the
+[Glimma](http://bioconductor.org/packages/Glimma) package. See the manual page for *glMDPlot.DESeqResults*.
+
+**pcaExplorer.** Interactive visualization of DESeq2 output,
+including PCA plots, boxplots of counts and other useful summaries can be
+generated using the [pcaExplorer](http://bioconductor.org/packages/pcaExplorer) package.
+See the *Launching the application* section of the package vignette.
+
+### Exporting results to CSV files
+
+A plain-text file of the results can be exported using the 
+base R functions *write.csv* or *write.delim*. 
+We suggest using a descriptive file name indicating the variable
+and levels which were tested.
+
+```{r export, eval=FALSE}
+write.csv(as.data.frame(resOrdered), 
+          file="condition_treated_results.csv")
+```
+
+Exporting only the results which pass an adjusted *p* value
+threshold can be accomplished with the *subset* function,
+followed by the *write.csv* function.
+
+```{r subset}
+resSig <- subset(resOrdered, padj < 0.1)
+resSig
+``` 
+
+## Multi-factor designs
+
+Experiments with more than one factor influencing the counts can be
+analyzed using design formula that include the additional variables.
+In fact, DESeq2 can analyze any possible experimental design that can
+be expressed with fixed effects terms (multiple factors, designs with
+interactions, designs with continuous variables, splines, and so on
+are all possible).
+
+By adding variables to the design, one can control for additional variation
+in the counts. For example, if the condition samples are balanced
+across experimental batches, by including the `batch` factor to the
+design, one can increase the sensitivity for finding differences due
+to `condition`. There are multiple ways to analyze experiments when the
+additional variables are of interest and not just controlling factors 
+(see [section on interactions](#interactions)).
+
+The data in the [pasilla](http://bioconductor.org/packages/pasilla) 
+package have a condition of interest 
+(the column `condition`), as well as information on the type of sequencing 
+which was performed (the column `type`), as we can see below:
+
+```{r multifactor}
+colData(dds)
+```
+
+We create a copy of the *DESeqDataSet*, so that we can rerun
+the analysis using a multi-factor design.
+
+```{r copyMultifactor}
+ddsMF <- dds
+```
+
+We can account for the different types of sequencing, and get a clearer picture
+of the differences attributable to the treatment.  As `condition` is the
+variable of interest, we put it at the end of the formula. Thus the *results*
+function will by default pull the `condition` results unless 
+`contrast` or `name` arguments are specified. 
+Then we can re-run *DESeq*:
+
+```{r replaceDesign}
+design(ddsMF) <- formula(~ type + condition)
+ddsMF <- DESeq(ddsMF)
+```
+
+Again, we access the results using the *results* function.
+
+```{r multiResults}
+resMF <- results(ddsMF)
+head(resMF)
+```
+
+It is also possible to retrieve the log2 fold changes, *p* values and adjusted
+*p* values of the `type` variable. The `contrast` argument of 
+the function *results* takes a character vector of length three:
+the name of the variable, the name of the factor level for the numerator
+of the log2 ratio, and the name of the factor level for the denominator.
+The `contrast` argument can also take other forms, as
+described in the help page for *results* and [below](#contrasts)
+
+```{r multiTypeResults}
+resMFType <- results(ddsMF,
+                     contrast=c("type", "single-read", "paired-end"))
+head(resMFType)
+```
+
+If the variable is continuous or an interaction term
+(see [section on interactions](#interactions))
+then the results can be extracted using the `name` argument to *results*,
+where the name is one of elements returned by `resultsNames(dds)`.
+
+<a name="transform"/>
+
+# Data transformations and visualization 
+
+## Count data transformations
+
+In order to test for differential expression, we operate on raw counts
+and use discrete distributions as described in the previous section on
+differential expression.
+However for other downstream analyses --
+e.g. for visualization or clustering -- it might be useful 
+to work with transformed versions of the count data. 
+
+Maybe the most obvious choice of transformation is the logarithm.
+Since count values for a gene can be zero in some
+conditions (and non-zero in others), some advocate the use of
+*pseudocounts*, i.e. transformations of the form:
+
+$$ y = \log_2(n + n_0) $$
+
+where *n* represents the count values and $n_0$ is a positive constant.
+
+In this section, we discuss two alternative
+approaches that offer more theoretical justification and a rational way
+of choosing the parameter equivalent to $n_0$ above.
+The *regularized logarithm* or *rlog* incorporates a prior on
+the sample differences [@Love2014], 
+and the other uses the concept of variance stabilizing
+transformations (VST) [@Tibshirani1988; @sagmb2003; @Anders:2010:GB].
+Both transformations produce transformed data on the log2 scale
+which has been normalized with respect to library size.
+
+The point of these two transformations, the *rlog* and the VST,
+is to remove the dependence of the variance on the mean,
+particularly the high variance of the logarithm of count data when the
+mean is low. Both *rlog* and VST use the experiment-wide trend
+of variance over mean, in order to transform the data to remove the
+experiment-wide trend. Note that we do not require or
+desire that all the genes have *exactly* the same variance after
+transformation. Indeed, in a figure below, you will see
+that after the transformations the genes with the same mean do not
+have exactly the same standard deviations, but that the
+experiment-wide trend has flattened. It is those genes with row
+variance above the trend which will allow us to cluster samples into
+interesting groups.
+
+**Note on running time:** if you have many samples (e.g. 100s),
+the *rlog* function might take too long, and so the *vst* function
+will be a faster choice. 
+The rlog and VST have similar properties, but the rlog requires
+fitting a shrinkage term for each sample and each gene which takes
+time.  See the DESeq2 paper for more discussion on the differences
+[@Love2014].
+
+### Blind dispersion estimation
+
+The two functions, *rlog* and *vst* have an argument
+`blind`, for whether the transformation should be blind to the
+sample information specified by the design formula. When
+`blind` equals `TRUE` (the default), the functions
+will re-estimate the dispersions using only an intercept.
+This setting should be used in order to compare
+samples in a manner wholly unbiased by the information about
+experimental groups, for example to perform sample QA (quality
+assurance) as demonstrated below.
+
+However, blind dispersion estimation is not the appropriate choice if
+one expects that many or the majority of genes (rows) will have large
+differences in counts which are explainable by the experimental design,
+and one wishes to transform the data for downstream analysis. In this
+case, using blind dispersion estimation will lead to large estimates
+of dispersion, as it attributes differences due to experimental design
+as unwanted *noise*, and will result in overly shrinking the transformed
+values towards each other. 
+By setting `blind` to `FALSE`, the dispersions
+already estimated will be used to perform transformations, or if not
+present, they will be estimated using the current design formula. Note
+that only the fitted dispersion estimates from mean-dispersion trend
+line are used in the transformation (the global dependence of
+dispersion on mean for the entire experiment).
+So setting `blind` to `FALSE` is still for the most
+part not using the information about which samples were in which
+experimental group in applying the transformation.
+
+### Extracting transformed values
+
+These transformation functions return an object of class *DESeqTransform*
+which is a subclass of *RangedSummarizedExperiment*. 
+For ~20 samples, running on a newly created `DESeqDataSet`,
+*rlog* may take 30 seconds, 
+*varianceStabilizingTransformation* may take 5 seconds, and
+*vst* less than 1 second (by subsetting to 1000 genes for
+calculating the global dispersion trend).
+However, the running times are shorter and more similar with `blind=FALSE` and
+if the function *DESeq* has already been run, because then
+it is not necessary to re-estimate the dispersion values.
+The *assay* function is used to extract the matrix of normalized values.
+
+```{r rlogAndVST}
+rld <- rlog(dds, blind=FALSE)
+vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
+vsd.fast <- vst(dds, blind=FALSE)
+head(assay(rld), 3)
+```
+
+### Regularized log transformation
+
+The function *rlog*, stands for *regularized log*,
+transforming the original count data to the log2 scale by fitting a
+model with a term for each sample and a prior distribution on the
+coefficients which is estimated from the data. This is the same kind
+of shrinkage (sometimes referred to as regularization, or moderation)
+of log fold changes used by the *DESeq* and
+*nbinomWaldTest*. The resulting data contains elements defined as:
+
+$$ \log_2(q_{ij}) = \beta_{i0} + \beta_{ij} $$
+
+where $q_{ij}$ is a parameter proportional to the expected true
+concentration of fragments for gene *i* and sample *j* (see
+formula [below](#theory)), $\beta_{i0}$ is an intercept which does not
+undergo shrinkage, and $\beta_{ij}$ is the sample-specific effect
+which is shrunk toward zero based on the dispersion-mean trend over
+the entire dataset. The trend typically captures high dispersions for
+low counts, and therefore these genes exhibit higher shrinkage from
+the *rlog*.
+
+Note that, as $q_{ij}$ represents the part of the mean value
+$\mu_{ij}$ after the size factor $s_j$ has been divided out, it is
+clear that the rlog transformation inherently accounts for differences
+in sequencing depth. Without priors, this design matrix would lead to
+a non-unique solution, however the addition of a prior on
+non-intercept betas allows for a unique solution to be found. 
+
+### Variance stabilizing transformation
+
+Above, we used a parametric fit for the dispersion. In this case, the
+closed-form expression for the variance stabilizing transformation is
+used by *varianceStabilizingTransformation*, which is
+derived in the file `vst.pdf`, that is distributed in the
+package alongside this vignette. If a local fit is used (option
+`fitType="locfit"` to *estimateDispersions*) a numerical integration
+is used instead. 
+
+### Effects of transformations on the variance
+
+The figure below plots the standard deviation of the transformed data,
+across samples, against the mean, using the shifted logarithm
+transformation, the regularized log transformation and the variance
+stabilizing transformation.  The shifted logarithm has elevated
+standard deviation in the lower count range, and the regularized log
+to a lesser extent, while for the variance stabilized data the
+standard deviation is roughly constant along the whole dynamic range.
+
+Note that the vertical axis in such plots is the square root of the
+variance over all samples, so including the variance due to the
+experimental conditions.  While a flat curve of the square root of
+variance over the mean may seem like the goal of such transformations,
+this may be unreasonable in the case of datasets with many true
+differences due to the experimental conditions.
+
+```{r meansd}
+# this gives log2(n + 1)
+ntd <- normTransform(dds)
+library("vsn")
+notAllZero <- (rowSums(counts(dds))>0)
+meanSdPlot(assay(ntd)[notAllZero,])
+meanSdPlot(assay(rld[notAllZero,]))
+meanSdPlot(assay(vsd[notAllZero,]))
+```
+
+## Data quality assessment by sample clustering and visualization
+
+Data quality assessment and quality control (i.e. the removal of
+insufficiently good data) are essential steps of any data
+analysis. These steps should typically be performed 
+very early in the analysis of a new data set,
+preceding or in parallel to the differential expression testing.
+
+We define the term *quality* as *fitness for purpose*.
+Our purpose is the detection of differentially expressed genes, and we
+are looking in particular for samples whose experimental treatment
+suffered from an anormality that renders the data points obtained from
+these particular samples detrimental to our purpose.
+
+### Heatmap of the count matrix
+
+To explore a count matrix, it is often instructive to look at it as a
+heatmap. Below we show how to produce such a heatmap for various
+transformations of the data. 
+
+```{r heatmap}
+library("pheatmap")
+select <- order(rowMeans(counts(dds,normalized=TRUE)),
+                decreasing=TRUE)[1:20]
+df <- as.data.frame(colData(dds)[,c("condition","type")])
+pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
+         cluster_cols=FALSE, annotation_col=df)
+pheatmap(assay(rld)[select,], cluster_rows=FALSE, show_rownames=FALSE,
+         cluster_cols=FALSE, annotation_col=df)
+pheatmap(assay(vsd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
+         cluster_cols=FALSE, annotation_col=df)
+```
+
+### Heatmap of the sample-to-sample distances
+
+Another use of the transformed data is sample clustering. Here, we apply the
+*dist* function to the transpose of the transformed count matrix to get
+sample-to-sample distances. We could alternatively use the variance stabilized
+transformation here.
+
+```{r sampleClust}
+sampleDists <- dist(t(assay(rld)))
+```
+
+A heatmap of this distance matrix gives us an overview over similarities
+and dissimilarities between samples.
+We have to provide a hierarchical clustering `hc` to the heatmap
+function based on the sample distances, or else the heatmap
+function would calculate a clustering based on the distances between
+the rows/columns of the distance matrix.
+
+```{r figHeatmapSamples}
+library("RColorBrewer")
+sampleDistMatrix <- as.matrix(sampleDists)
+rownames(sampleDistMatrix) <- paste(rld$condition, rld$type, sep="-")
+colnames(sampleDistMatrix) <- NULL
+colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
+pheatmap(sampleDistMatrix,
+         clustering_distance_rows=sampleDists,
+         clustering_distance_cols=sampleDists,
+         col=colors)
+```
+
+### Principal component plot of the samples
+
+Related to the distance matrix is the PCA plot, which shows 
+the samples in the 2D plane spanned by their first two principal
+components. This type of plot is useful for visualizing the overall
+effect of experimental covariates and batch effects.
+
+```{r figPCA}
+plotPCA(rld, intgroup=c("condition", "type"))
+```
+
+It is also possible to customize the PCA plot using the
+*ggplot* function.
+
+```{r figPCA2}
+pcaData <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
+percentVar <- round(100 * attr(pcaData, "percentVar"))
+ggplot(pcaData, aes(PC1, PC2, color=condition, shape=type)) +
+  geom_point(size=3) +
+  xlab(paste0("PC1: ",percentVar[1],"% variance")) +
+  ylab(paste0("PC2: ",percentVar[2],"% variance")) + 
+  coord_fixed()
+```
+
+# Variations to the standard workflow
+
+## Wald test individual steps 
+
+The function *DESeq* runs the following functions in order:
+
+```{r WaldTest, eval=FALSE}
+dds <- estimateSizeFactors(dds)
+dds <- estimateDispersions(dds)
+dds <- nbinomWaldTest(dds)
+```
+
+<a name="contrasts"/>
+
+## Contrasts 
+
+A contrast is a linear combination of estimated log2 fold changes,
+which can be used to test if differences between groups are equal to
+zero.  The simplest use case for contrasts is an experimental design
+containing a factor with three levels, say A, B and C.  Contrasts
+enable the user to generate results for all 3 possible differences:
+log2 fold change of B vs A, of C vs A, and of C vs B.
+The `contrast` argument of *results* function is
+used to extract test results of log2 fold changes of interest, for example:
+
+```{r simpleContrast, eval=FALSE}
+results(dds, contrast=c("condition","C","B"))
+``` 
+
+Log2 fold changes can also be added and subtracted by providing a
+`list` to the `contrast` argument which has two elements:
+the names of the log2 fold changes to add, and the names of the log2
+fold changes to subtract. The names used in the list should come from
+`resultsNames(dds)`.
+
+Alternatively, a numeric vector of the
+length of `resultsNames(dds)` can be provided, for manually
+specifying the linear combination of terms.  Demonstrations of the use
+of contrasts for various designs can be found in the examples section
+of the help page for the *results* function. The
+mathematical formula that is used to generate the contrasts can be
+found [below](#theory).
+
+<a name="interactions"/>
+
+## Interactions 
+
+Interaction terms can be added to the design formula, in order to
+test, for example, if the log2 fold change attributable to a given
+condition is *different* based on another factor, for example if the
+condition effect differs across genotype.
+
+Many users begin to add interaction terms to the design formula, when
+in fact a much simpler approach would give all the results tables that
+are desired. We will explain this approach first, because it is much
+simpler to perform.
+If the comparisons of interest are, for example, the effect
+of a condition for different sets of samples, a simpler approach than
+adding interaction terms explicitly to the design formula is to
+perform the following steps:
+
+* combine the factors of interest into a single factor with all
+  combinations of the original factors 
+* change the design to include just this factor, e.g. ~ group
+
+Using this design is similar to adding an interaction term, 
+in that it models multiple condition effects which
+can be easily extracted with *results*.
+Suppose we have two factors `genotype` (with values I, II, and III) 
+and `condition` (with values A and B), and we want to extract 
+the condition effect specifically for each genotype. We could use the
+following approach to obtain, e.g. the condition effect for genotype I: 
+
+```{r combineFactors, eval=FALSE}
+dds$group <- factor(paste0(dds$genotype, dds$condition))
+design(dds) <- ~ group
+dds <- DESeq(dds)
+resultsNames(dds)
+results(dds, contrast=c("group", "IB", "IA"))
+```
+
+The following two plots diagram hypothetical genotype-specific
+condition effects, which could be modeled with interaction terms by
+using a design of `~genotype + condition + genotype:condition`.
+
+In the first plot (Gene 1), note that the condition effect
+is consistent across genotypes. Although condition A has a different
+baseline for I,II, and III, the condition effect is a log2 fold
+change of about 2 for each genotype.  Using a model with an
+interaction term `genotype:condition`, the interaction terms for
+genotype II and genotype III will be nearly 0.
+
+Here, the y-axis represents log2(n+1), and each
+group has 20 samples (black dots). A red line connects the mean of the
+groups within each genotype. 
+
+```{r interFig, echo=FALSE, results="hide"}
+npg <- 20
+mu <- 2^c(8,10,9,11,10,12)
+cond <- rep(rep(c("A","B"),each=npg),3)
+geno <- rep(c("I","II","III"),each=2*npg)
+table(cond, geno)
+counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
+d <- data.frame(log2c=log2(counts+1), cond, geno)
+library(ggplot2)
+plotit <- function(d, title) {
+  ggplot(d, aes(x=cond, y=log2c, group=geno)) + 
+    geom_jitter(size=1.5, position = position_jitter(width=.15)) +
+    facet_wrap(~ geno) + 
+    stat_summary(fun.y=mean, geom="line", colour="red", size=0.8) + 
+    xlab("condition") + ylab("log2(counts+1)") + ggtitle(title)
+}
+plotit(d, "Gene 1") + ylim(7,13)
+lm(log2c ~ cond + geno + geno:cond, data=d)
+``` 
+
+In the second plot
+(Gene 2), we can see that the condition effect is not consistent
+across genotype. Here the main condition effect (the effect for the
+reference genotype I) is again 2. However, this time the interaction
+terms will be around 1 for genotype II and -4 for genotype III. This
+is because the condition effect is higher by 1 for genotype II
+compared to genotype I, and lower by 4 for genotype III compared to
+genotype I.  The condition effect for genotype II (or III) is
+obtained by adding the main condition effect and the interaction
+term for that genotype.  Such a plot can be made using the
+*plotCounts* function as shown above.
+
+```{r interFig2, echo=FALSE, results="hide"}
+mu[4] <- 2^12
+mu[6] <- 2^8
+counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
+d2 <- data.frame(log2c=log2(counts + 1), cond, geno)
+plotit(d2, "Gene 2") + ylim(7,13)
+lm(log2c ~ cond + geno + geno:cond, data=d2)
+``` 
+
+Now we will continue to explain the use of interactions in order to
+test for *differences* in condition effects. We continue with
+the example of condition effects across three genotypes (I, II, and III).
+
+The key point to remember about designs with interaction terms is
+that, unlike for a design `~genotype + condition`, where the condition
+effect represents the 
+*overall* effect controlling for differences due to genotype, by adding
+`genotype:condition`, the main condition effect only
+represents the effect of condition for the *reference level* of
+genotype (I, or whichever level was defined by the user as the
+reference level). The interaction terms `genotypeII.conditionB`
+and `genotypeIII.conditionB` give the *difference*
+between the condition effect for a given genotype and the condition
+effect for the reference genotype. 
+
+This genotype-condition interaction example is examined in further
+detail in Example 3 in the help page for *results*, which
+can be found by typing `?results`. In particular, we show how to
+test for differences in the condition effect across genotype, and we
+show how to obtain the condition effect for non-reference genotypes.
+
+Note that for DESeq2 versions higher than 1.10, the *DESeq* function
+will turn off log fold change shrinkage (setting `betaPrior=FALSE`),
+for designs which contain an interaction term. Turning off the log
+fold change shrinkage allows the software to use standard model
+matrices (as would be produced by *model.matrix*), where the
+interaction coefficients are easier to interpret.
+
+## Time-series experiments
+
+There are a number of ways to analyze time-series experiments,
+depending on the biological question of interest. In order to test for
+any differences over multiple time points, once can use a design
+including the time factor, and then test using the likelihood ratio
+test as described in the following section, where the time factor is
+removed in the reduced formula. For a control and treatment time
+series, one can use a design formula containing the condition factor,
+the time factor, and the interaction of the two. In this case, using
+the likelihood ratio test with a reduced model which does not contain
+the interaction terms will test whether the condition induces a change
+in gene expression at any time point after the reference level time point
+(time 0). An example of the later analysis is provided in our
+[RNA-seq workflow](http://www.bioconductor.org/help/workflows/rnaseqGene).
+
+## Likelihood ratio test 
+
+DESeq2 offers two kinds of hypothesis tests: the Wald test, where
+we use the estimated standard error of a log2 fold change to test if it is
+equal to zero, and the likelihood ratio test (LRT). The LRT examines
+two models for the counts, a *full* model with a certain number
+of terms and a *reduced* model, in which some of the terms of the
+*full* model are removed. The test determines if the increased
+likelihood of the data using the extra terms in the *full* model
+is more than expected if those extra terms are truly zero.
+
+The LRT is therefore useful for testing multiple
+terms at once, for example testing 3 or more levels of a factor at once,
+or all interactions between two variables. 
+The LRT for count data is conceptually similar to an analysis of variance (ANOVA)
+calculation in linear regression, except that in the case of the Negative
+Binomial GLM, we use an analysis of deviance (ANODEV), where the
+*deviance* captures the difference in likelihood between a full
+and a reduced model.
+
+The likelihood ratio test can be performed by specifying `test="LRT"`
+when using the *DESeq* function, and
+providing a reduced design formula, e.g. one in which a
+number of terms from `design(dds)` are removed.
+The degrees of freedom for the test is obtained from the difference
+between the number of parameters in the two models. 
+A simple likelihood ratio test, if the full design was
+`~condition` would look like:
+
+```{r simpleLRT, eval=FALSE}
+dds <- DESeq(dds, test="LRT", reduced=~1)
+res <- results(dds)
+``` 
+
+If the full design contained other variables, 
+such as a batch variable, e.g. `~batch + condition`
+then the likelihood ratio test would look like:
+
+```{r simpleLRT2, eval=FALSE}
+dds <- DESeq(dds, test="LRT", reduced=~batch)
+res <- results(dds)
+``` 
+
+<a name="outlier"/>
+
+## Approach to count outliers 
+
+RNA-seq data sometimes contain isolated instances of very large counts
+that are apparently unrelated to the experimental or study design, and
+which may be considered outliers. There are many reasons why outliers
+can arise, including rare technical or experimental artifacts, read
+mapping problems in the case of genetically differing samples, and
+genuine, but rare biological events. In many cases, users appear
+primarily interested in genes that show a consistent behavior, and
+this is the reason why by default, genes that are affected by such
+outliers are set aside by DESeq2, or if there are sufficient samples,
+outlier counts are replaced for model fitting.  These two behaviors
+are described below.
+
+The *DESeq* function calculates, for every gene and for every sample,
+a diagnostic test for outliers called *Cook's distance*. Cook's distance 
+is a measure of how much a single sample is influencing the fitted 
+coefficients for a gene, and a large value of Cook's distance is 
+intended to indicate an outlier count. 
+The Cook's distances are stored as a matrix available in 
+`assays(dds)[["cooks"]]`.
+
+The *results* function automatically flags genes which contain a 
+Cook's distance above a cutoff for samples which have 3 or more replicates. 
+The *p* values and adjusted *p* values for these genes are set to `NA`. 
+At least 3 replicates are required for flagging, as it is difficult to judge
+which sample might be an outlier with only 2 replicates.
+This filtering can be turned off with `results(dds, cooksCutoff=FALSE)`.
+
+With many degrees of freedom -- i.\,e., many more samples than number of parameters to 
+be estimated -- it is undesirable to remove entire genes from the analysis
+just because their data include a single count outlier. When there
+are 7 or more replicates for a given sample, the *DESeq*
+function will automatically replace counts with large Cook's distance 
+with the trimmed mean over all samples, scaled up by the size factor or 
+normalization factor for that sample. This approach is conservative, 
+it will not lead to false positives, as it replaces
+the outlier value with the value predicted by the null hypothesis.
+This outlier replacement only occurs when there are 7 or more
+replicates, and can be turned off with 
+`DESeq(dds, minReplicatesForReplace=Inf)`.
+
+The default Cook's distance cutoff for the two behaviors described above
+depends on the sample size and number of parameters
+to be estimated. The default is to use the 99% quantile of the 
+F(p,m-p) distribution (with *p* the number of parameters including the 
+intercept and *m* number of samples).
+The default for gene flagging can be modified using the `cooksCutoff` 
+argument to the *results* function. 
+For outlier replacement, *DESeq* preserves the original counts in
+`counts(dds)` saving the replacement counts as a matrix named
+`replaceCounts` in `assays(dds)`.
+Note that with continuous variables in the design, outlier detection
+and replacement is not automatically performed, as our 
+current methods involve a robust estimation of within-group variance
+which does not extend easily to continuous covariates. However, users
+can examine the Cook's distances in `assays(dds)[["cooks"]]`, in
+order to perform manual visualization and filtering if necessary.
+
+**Note on many outliers:** if there are very many outliers (e.g. many
+hundreds or thousands) reported by `summary(res)`, one might consider
+further exploration to see if a single sample or a few samples should
+be removed due to low quality.  The automatic outlier
+filtering/replacement is most useful in situations which the number of
+outliers is limited. When there are thousands of reported outliers, it
+might make more sense to turn off the outlier filtering/replacement
+(*DESeq* with `minReplicatesForReplace=Inf` and *results* with
+`cooksCutoff=FALSE`) and perform manual inspection: First it would be
+advantageous to make a PCA plot as described above to spot individual
+sample outliers; Second, one can make a boxplot of the Cook's
+distances to see if one sample is consistently higher than others
+(here this is not the case):
+
+```{r boxplotCooks}
+par(mar=c(8,5,2,2))
+boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
+```
+
+## Dispersion plot and fitting alternatives
+
+Plotting the dispersion estimates is a useful diagnostic. The dispersion
+plot below is typical, with the final estimates shrunk
+from the gene-wise estimates towards the fitted estimates. Some gene-wise
+estimates are flagged as outliers and not shrunk towards the fitted value,
+(this outlier detection is described in the manual page for *estimateDispersionsMAP*).
+The amount of shrinkage can be more or less than seen here, depending 
+on the sample size, the number of coefficients, the row mean
+and the variability of the gene-wise estimates.
+
+```{r dispFit}
+plotDispEsts(dds)
+```
+
+### Local or mean dispersion fit
+
+A local smoothed dispersion fit is automatically substitited in the case that
+the parametric curve doesn't fit the observed dispersion mean relationship.
+This can be prespecified by providing the argument
+`fitType="local"` to either *DESeq* or *estimateDispersions*.
+Additionally, using the mean of gene-wise disperion estimates as the
+fitted value can be specified by providing the argument `fitType="mean"`. 
+
+### Supply a custom dispersion fit
+
+Any fitted values can be provided during dispersion estimation, using
+the lower-level functions described in the manual page for
+*estimateDispersionsGeneEst*. In the code chunk below, we
+store the gene-wise estimates which were already calculated and saved 
+in the metadata column `dispGeneEst`. Then we calculate the
+median value of the dispersion estimates above a threshold, and save
+these values as the fitted dispersions, using the replacement function
+for *dispersionFunction*. In the last line, the function
+*estimateDispersionsMAP*, uses the 
+fitted dispersions to generate maximum *a posteriori* (MAP)
+estimates of dispersion. 
+
+```{r dispFitCustom}
+ddsCustom <- dds
+useForMedian <- mcols(ddsCustom)$dispGeneEst > 1e-7
+medianDisp <- median(mcols(ddsCustom)$dispGeneEst[useForMedian],
+                     na.rm=TRUE)
+dispersionFunction(ddsCustom) <- function(mu) medianDisp
+ddsCustom <- estimateDispersionsMAP(ddsCustom)
+```
+
+<a name="indfilt"/>
+
+## Independent filtering of results
+
+The *results* function of the DESeq2 package performs independent
+filtering by default using the mean of normalized counts as a filter
+statistic.  A threshold on the filter statistic is found which
+optimizes the number of adjusted *p* values lower than a significance
+level `alpha` (we use the standard variable name for significance
+level, though it is unrelated to the dispersion parameter $\alpha$).
+The theory behind independent filtering is discussed in greater detail
+[below](#indfilttheory). The adjusted *p* values for the genes
+which do not pass the filter threshold are set to `NA`.
+
+The default independent filtering is performed using the *filtered_p*
+function of the [genefilter](http://bioconductor.org/packages/genefilter) package, and all of the
+arguments of *filtered_p* can be passed to the *results* function.
+The filter threshold value and the number of rejections at each
+quantile of the filter statistic are available as metadata of the
+object returned by *results*.
+
+For example, we can visualize the optimization by plotting the
+`filterNumRej` attribute of the results object. The *results* function
+maximizes the number of rejections (adjusted *p* value less than a
+significance level), over the quantiles of a filter statistic (the
+mean of normalized counts). The threshold chosen (vertical line) is
+the lowest quantile of the filter for which the number of rejections
+is within 1 residual standard deviation to the peak of a curve fit to
+the number of rejections over the filter quantiles:
+
+```{r filtByMean}
+metadata(res)$alpha
+metadata(res)$filterThreshold
+plot(metadata(res)$filterNumRej, 
+     type="b", ylab="number of rejections",
+     xlab="quantiles of filter")
+lines(metadata(res)$lo.fit, col="red")
+abline(v=metadata(res)$filterTheta)
+```
+
+Independent filtering can be turned off by setting 
+`independentFiltering` to `FALSE`.
+
+```{r noFilt}
+resNoFilt <- results(dds, independentFiltering=FALSE)
+addmargins(table(filtering=(res$padj < .1),
+                 noFiltering=(resNoFilt$padj < .1)))
+``` 
+
+## Tests of log2 fold change above or below a threshold
+
+It is also possible to provide thresholds for constructing
+Wald tests of significance. Two arguments to the *results*
+function allow for threshold-based Wald tests: `lfcThreshold`,
+which takes a numeric of a non-negative threshold value, 
+and `altHypothesis`, which specifies the kind of test.
+Note that the *alternative hypothesis* is specified by the user, 
+i.e. those genes which the user is interested in finding, and the test 
+provides *p* values for the null hypothesis, the complement of the set 
+defined by the alternative. The `altHypothesis` argument can take one 
+of the following four values, where $\beta$ is the log2 fold change
+specified by the `name` argument, and $x$ is the `lfcThreshold`.
+
+* `greaterAbs` - $|\beta| > x$ - tests are two-tailed
+* `lessAbs` - $|\beta| < x$ - *p* values are the maximum of the upper and lower tests
+* `greater` - $\beta > x$
+* `less` - $\beta < -x$
+
+The test `altHypothesis="lessAbs"` requires that the user have
+run *DESeq* with the argument `betaPrior=FALSE`.  To
+understand the reason for this requirement, consider that during
+hypothesis testing, the null hypothesis is favored unless the data
+provide strong evidence to reject the null.  For this test, including
+a zero-centered prior on log fold change would favor the alternative
+hypothesis, shrinking log fold changes toward zero.  Removing the
+prior on log fold changes for tests of small log fold change allows
+for detection of only those genes where the data alone provides
+evidence against the null.
+
+The four possible values of `altHypothesis` are demonstrated
+in the following code and visually by MA-plots in the following figures.
+First we run *DESeq* and specify `betaPrior=FALSE` in order 
+to demonstrate `altHypothesis="lessAbs"`.
+
+```{r ddsNoPrior}
+ddsNoPrior <- DESeq(dds, betaPrior=FALSE)
+```
+
+In order to produce results tables for the following tests, the same arguments
+(except `ylim`) would be provided to the *results* function. 
+
+```{r lfcThresh}
+par(mfrow=c(2,2),mar=c(2,2,1,1))
+yl <- c(-2.5,2.5)
+resGA <- results(dds, lfcThreshold=.5, altHypothesis="greaterAbs")
+resLA <- results(ddsNoPrior, lfcThreshold=.5, altHypothesis="lessAbs")
+resG <- results(dds, lfcThreshold=.5, altHypothesis="greater")
+resL <- results(dds, lfcThreshold=.5, altHypothesis="less")
+plotMA(resGA, ylim=yl)
+abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
+plotMA(resLA, ylim=yl)
+abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
+plotMA(resG, ylim=yl)
+abline(h=.5,col="dodgerblue",lwd=2)
+plotMA(resL, ylim=yl)
+abline(h=-.5,col="dodgerblue",lwd=2)
+``` 
+
+<a name="access"/>
+
+## Access to all calculated values
+
+All row-wise calculated values (intermediate dispersion calculations,
+coefficients, standard errors, etc.) are stored in the *DESeqDataSet* 
+object, e.g. `dds` in this vignette. These values are accessible 
+by calling *mcols* on `dds`. 
+Descriptions of the columns are accessible by two calls to 
+*mcols*. Note that the call to `substr` below is only for display
+purposes.
+
+```{r mcols}
+mcols(dds,use.names=TRUE)[1:4,1:4]
+substr(names(mcols(dds)),1,10) 
+mcols(mcols(dds), use.names=TRUE)[1:4,]
+```
+
+The mean values $\mu_{ij} = s_j q_{ij}$ and the Cook's distances for each gene and
+sample are stored as matrices in the assays slot:
+
+```{r muAndCooks}
+head(assays(dds)[["mu"]])
+head(assays(dds)[["cooks"]])
+``` 
+
+The dispersions $\alpha_i$ can be accessed with the
+*dispersions* function.
+
+```{r dispersions}
+head(dispersions(dds))
+head(mcols(dds)$dispersion)
+``` 
+
+The size factors $s_j$ are accessible via *sizeFactors*:
+
+```{r sizefactors}
+sizeFactors(dds)
+``` 
+
+For advanced users, we also include a convenience function *coef* for 
+extracting the matrix $[\beta_{ir}]$ for all genes *i* and
+model coefficients $r$.
+This function can also return a matrix of standard errors, see `?coef`.
+The columns of this matrix correspond to the effects returned by *resultsNames*.
+Note that the *results* function is best for building 
+results tables with *p* values and adjusted *p* values.
+
+```{r coef}
+head(coef(dds))
+``` 
+
+The beta prior variance $\sigma_r^2$ is stored as an attribute of the
+*DESeqDataSet*: 
+
+```{r betaPriorVar}
+attr(dds, "betaPriorVar")
+``` 
+
+The dispersion prior variance $\sigma_d^2$ is stored as an
+attribute of the dispersion function:
+
+```{r dispPriorVar}
+dispersionFunction(dds)
+attr(dispersionFunction(dds), "dispPriorVar")
+``` 
+
+The version of DESeq2 which was used to construct the
+*DESeqDataSet* object, or the version used when
+*DESeq* was run, is stored here:
+
+```{r versionNum}
+metadata(dds)[["version"]]
+``` 
+
+## Sample-/gene-dependent normalization factors 
+
+In some experiments, there might be gene-dependent dependencies
+which vary across samples. For instance, GC-content bias or length
+bias might vary across samples coming from different labs or
+processed at different times. We use the terms *normalization factors*
+for a gene x sample matrix, and *size factors* for a
+single number per sample.  Incorporating normalization factors,
+the mean parameter $\mu_{ij}$ becomes:
+
+$$ \mu_{ij} = NF_{ij} q_{ij} $$
+
+with normalization factor matrix *NF* having the same dimensions
+as the counts matrix *K*. This matrix can be incorporated as shown
+below. We recommend providing a matrix with row-wise geometric means of 1, 
+so that the mean of normalized counts for a gene is close to the mean
+of the unnormalized counts.
+This can be accomplished by dividing out the current row geometric means.
+
+```{r normFactors, eval=FALSE}
+normFactors <- normFactors / exp(rowMeans(log(normFactors)))
+normalizationFactors(dds) <- normFactors
+```
+
+These steps then replace *estimateSizeFactors* which occurs within the
+*DESeq* function. The *DESeq* function will look for pre-existing
+normalization factors and use these in the place of size factors
+(and a message will be printed confirming this).
+
+The methods provided by the
+[cqn](http://bioconductor.org/packages/cqn) or 
+[EDASeq](http://bioconductor.org/packages/EDASeq) packages
+can help correct for GC or length biases. They both describe in their
+vignettes how to create matrices which can be used by DESeq2.
+From the formula above, we see that normalization factors should be on
+the scale of the counts, like size factors, and unlike offsets which
+are typically on the scale of the predictors (i.e. the logarithmic scale for
+the negative binomial GLM). At the time of writing, the transformation
+from the matrices provided by these packages should be:
+
+```{r offsetTransform, eval=FALSE}
+cqnOffset <- cqnObject$glm.offset
+cqnNormFactors <- exp(cqnOffset)
+EDASeqNormFactors <- exp(-1 * EDASeqOffset)
+```
+
+## "Model matrix not full rank"
+
+While most experimental designs run easily using design formula, some
+design formulas can cause problems and result in the *DESeq*
+function returning an error with the text: "the model matrix is not
+full rank, so the model cannot be fit as specified."  There are two
+main reasons for this problem: either one or more columns in the model
+matrix are linear combinations of other columns, or there are levels
+of factors or combinations of levels of multiple factors which are
+missing samples. We address these two problems below and discuss
+possible solutions:
+
+### Linear combinations
+
+The simplest case is the linear combination, or linear dependency
+problem, when two variables contain exactly the same information, such
+as in the following sample table. The software cannot fit an effect
+for `batch` and `condition`, because they produce
+identical columns in the model matrix. This is also referred to as
+*perfect confounding*. A unique solution of coefficients (the $\beta_i$ in
+the formula [below](#theory)) is not possible.
+
+```{r lineardep, echo=FALSE}
+DataFrame(batch=factor(c(1,1,2,2)), condition=factor(c("A","A","B","B")))
+``` 
+
+Another situation which will cause problems is when the variables are
+not identical, but one variable can be formed by the combination of
+other factor levels. In the following example, the effect of batch 2
+vs 1 cannot be fit because it is identical to a column in the model
+matrix which represents the condition C vs A effect.
+
+```{r lineardep2, echo=FALSE}
+DataFrame(batch=factor(c(1,1,1,1,2,2)), condition=factor(c("A","A","B","B","C","C")))
+``` 
+
+In both of these cases above, the batch effect cannot be fit and must
+be removed from the model formula. There is just no way to tell apart
+the condition effects and the batch effects. The options are either to assume
+there is no batch effect (which we know is highly unlikely given the
+literature on batch effects in sequencing datasets) or to repeat the
+experiment and properly balance the conditions across batches.
+A balanced design would look like:
+
+```{r lineardep3, echo=FALSE}
+DataFrame(batch=factor(c(1,1,1,2,2,2)), condition=factor(c("A","B","C","A","B","C")))
+``` 
+
+<a name="nested-indiv"/>
+
+### Group-specific condition effects, individuals nested within groups
+
+Finally, there is a case where we *can* in fact perform inference, but
+we may need to re-arrange terms to do so. Consider an experiment with
+grouped individuals, where we seek to test the group-specific effect
+of a condition or treatment, while controlling for individual
+effects. The individuals are nested within the groups: an individual
+can only be in one of the groups, although each individual has one or
+more observations across condition.
+
+An example of such an experiment is below:
+
+```{r groupeffect}
+coldata <- DataFrame(grp=factor(rep(c("X","Y"),each=6)),
+                       ind=factor(rep(1:6,each=2)),
+                      cnd=factor(rep(c("A","B"),6)))
+coldata
+```
+
+Note that individual (`ind`) is a *factor* not a numeric. This is very
+important. 
+
+To make R display all the rows, we can do:
+
+```{r}
+as.data.frame(coldata)
+```
+
+We have two groups of samples X and Y, each with three distinct
+individuals (labeled here 1-6). For each individual, we have
+conditions A and B (for example, this could be control and treated).
+
+This design can be analyzed by DESeq2 but requires a bit of
+refactoring in order to fit the model terms. Here we will use a trick
+described in the [edgeR](http://bioconductor.org/packages/edgeR) user
+guide, from the section 
+*Comparisons Both Between and Within Subjects*.  If we try to
+analyze with a formula such as, `~ ind + grp*cnd`, we will
+obtain an error, because the effect for group is a linear combination
+of the individuals.
+
+However, the following steps allow for an analysis of group-specific
+condition effects, while controlling for differences in individual.
+For object construction, you can use a simple design, such as 
+`~ ind + cnd`, as
+long as you remember to replace it before running *DESeq*.
+Then add a column `ind.n` which distinguishes the
+individuals nested within a group. Here, we add this column to
+coldata, but in practice you would add this column to `dds`.
+
+```{r groupeffect2}
+coldata$ind.n <- factor(rep(rep(1:3,each=2),2))
+as.data.frame(coldata)
+``` 
+
+Now we can reassign our *DESeqDataSet* a design of
+`~ grp + grp:ind.n + grp:cnd`, before we call
+*DESeq*. This new design will result in the following model
+matrix: 
+
+```{r groupeffect3}
+model.matrix(~ grp + grp:ind.n + grp:cnd, coldata)
+``` 
+
+Note that, if you have unbalanced numbers of individuals in the two
+groups, you will have zeros for some of the interactions between `grp`
+and `ind.n`. You can remove these columns manually from the model
+matrix and pass the corrected model matrix to the `full` argument of
+the *DESeq* function. See example code in the next section.
+
+Above, the terms `grpX.cndB` and `grpY.cndB` give the
+group-specific condition effects, in other words, the condition B vs A
+effect for group X samples, and likewise for group Y samples. These
+terms control for all of the six individual effects.
+These group-specific condition effects can be extracted using
+*results* with the `name` argument. 
+
+Furthermore, `grpX.cndB` and `grpY.cndB` can be contrasted using the
+`contrast` argument, in order to test if the condition effect is
+different across group: 
+
+```{r groupeffect4, eval=FALSE}
+results(dds, contrast=list("grpY.cndB","grpX.cndB"))
+``` 
+
+### Levels without samples
+
+The base R function for creating model matrices will produce a column
+of zeros if a level is missing from a factor or a combination of
+levels is missing from an interaction of factors. The solution to the
+first case is to call *droplevels* on the column, which will
+remove levels without samples. This was shown in the beginning of this
+vignette.
+
+The second case is also solvable, by manually editing the model
+matrix, and then providing this to *DESeq*. Here we
+construct an example dataset to illustrate:
+
+```{r missingcombo}
+group <- factor(rep(1:3,each=6))
+condition <- factor(rep(rep(c("A","B","C"),each=2),3))
+d <- DataFrame(group, condition)[-c(17,18),]
+as.data.frame(d)
+``` 
+
+Note that if we try to estimate all interaction terms, we introduce a
+column with all zeros, as there are no condition C samples for group
+3. (Here, *unname* is used to display the matrix concisely.)
+
+```{r missingcombo2}
+m1 <- model.matrix(~ condition*group, d)
+colnames(m1)
+unname(m1)
+all.zero <- apply(m1, 2, function(x) all(x==0))
+all.zero
+``` 
+
+We can remove this column like so:
+
+```{r missingcombo3}
+idx <- which(all.zero)
+m1 <- m1[,-idx]
+unname(m1)
+``` 
+
+Now this matrix `m1` can be provided to the `full`
+argument of *DESeq*.  For a likelihood ratio test of
+interactions, a model matrix using a reduced design such as
+`~ condition + group` can be given to the `reduced`
+argument. Wald tests can also be generated instead of the likelihood
+ratio test, but for user-supplied model matrices, the argument
+`betaPrior` must be set to `FALSE`.
+
+<a name="theory"/>
+
+# Theory behind DESeq2
+
+## The DESeq2 model 
+
+The DESeq2 model and all the steps taken in the software
+are described in detail in our publication [@Love2014],
+and we include the formula and descriptions in this section as well.
+The differential expression analysis in DESeq2 uses a generalized
+linear model of the form:
+
+$$ K_{ij} \sim \textrm{NB}(\mu_{ij}, \alpha_i) $$
+
+$$ \mu_{ij} = s_j q_{ij} $$
+
+$$ \log_2(q_{ij}) = x_{j.} \beta_i $$
+
+where counts $K_{ij}$ for gene *i*, sample *j* are modeled using
+a negative binomial distribution with fitted mean $\mu_{ij}$
+and a gene-specific dispersion parameter $\alpha_i$.
+The fitted mean is composed of a sample-specific size factor
+$s_j$ and a parameter $q_{ij}$ 
+proportional to the expected true concentration of fragments for sample *j*.
+The coefficients $\beta_i$ give the log2 fold changes for gene *i* for each 
+column of the model matrix $X$. 
+Note that the model can be generalized to use sample- and
+gene-dependent normalization factors $s_{ij}$. 
+
+The dispersion parameter $\alpha_i$ defines the relationship between
+the variance of the observed count and its mean value. In other
+words, how far do we expected the observed count will be from the
+mean value, which depends both on the size factor $s_j$ and the
+covariate-dependent part $q_{ij}$ as defined above.
+
+$$ \textrm{Var}(K_{ij}) = E[ (K_{ij} - \mu_{ij})^2 ] = \mu_{ij} + \alpha_i \mu_{ij}^2 $$
+
+An option in DESeq2 is to provide maximum *a posteriori*
+estimates of the log2 fold changes in $\beta_i$ after incorporating a 
+zero-centered Normal prior (`betaPrior`). While previously,
+these moderated, or shrunken, estimates were generated by
+*DESeq* or *nbinomWaldTest* functions, they are now produced by the
+*lfcShrink* function.
+Dispersions are estimated using expected mean values from the maximum
+likelihood estimate of log2 fold changes, and optimizing the Cox-Reid 
+adjusted profile likelihood, as first implemented for RNA-seq data in
+[edgeR](http://bioconductor.org/packages/edgeR) 
+[@CR,edgeR_GLM]. The steps performed by the *DESeq* function are
+documented in its manual page `?DESeq`; briefly, they are:
+
+1) estimation of size factors $s_j$ by *estimateSizeFactors*
+2) estimation of dispersion $\alpha_i$ by *estimateDispersions*
+3) negative binomial GLM fitting for $\beta_i$ and Wald statistics by 
+*nbinomWaldTest*
+
+For access to all the values calculated during these steps, see the
+section [above](#access).
+
+## Changes compared to DESeq
+
+The main changes in the package *DESeq2*, compared to the (older)
+version *DESeq*, are as follows: 
+
+* *RangedSummarizedExperiment* is used as the superclass for storage of input data,
+  intermediate calculations and results.
+* Optional, maximum *a posteriori* estimation of GLM coefficients
+  incorporating a zero-centered Normal prior with variance estimated
+  from data (equivalent to Tikhonov/ridge regularization). This
+  adjustment has little effect on genes with high counts, yet it helps
+  to moderate the otherwise large variance in log2 fold change
+  estimates for genes with low counts or highly variable counts.
+  These estimates are now provided by the *lfcShrink* function.
+* Maximum *a posteriori* estimation of dispersion replaces the
+  `sharingMode` options `fit-only` or `maximum` of the previous version
+  of the package. This is similar to the dispersion estimation methods of DSS [@Wu2012New].
+* All estimation and inference is based on the generalized linear model, which
+  includes the two condition case (previously the *exact test* was used).
+* The Wald test for significance of GLM coefficients is provided as the default
+  inference method, with the likelihood ratio test of the previous version still available.
+* It is possible to provide a matrix of sample-/gene-dependent
+  normalization factors.
+* Automatic independent filtering on the mean of normalized counts.
+* Automatic outlier detection and handling.
+
+<a name="changes"/>
+
+## Methods changes since the 2014 DESeq2 paper
+
+* In version 1.16 (Novermber 2016), the log2 fold change 
+  shrinkage is no longer default for the *DESeq* and *nbinomWaldTest*
+  functions, by setting the defaults of these to `betaPrior=FALSE`,
+  and by introducing a separate function *lfcShrink*, which performs
+  log2 fold change shrinkage for visualization and ranking of genes.
+  While for the majority of bulk RNA-seq experiments, the LFC
+  shrinkage did not affect statistical testing, DESeq2 has become used
+  as an inference engine by a wider community, and certain sequencing
+  datasets show better performance with the testing separated from the
+  use of the LFC prior. Also, the separation of LFC shrinkage to a separate
+  function *lfcShrink* allows for easier methods development of
+  alternative effect size estimators.
+* A small change to the independent filtering routine: instead
+  of taking the quantile of the filter (the mean of normalized counts) which
+  directly *maximizes* the number of rejections, the threshold chosen is 
+  the lowest quantile of the filter for which the
+  number of rejections is close to the peak of a curve fit
+  to the number of rejections over the filter quantiles.
+  ``Close to'' is defined as within 1 residual standard deviation.
+  This change was introduced in version 1.10 (October 2015).
+* For the calculation of the beta prior variance, instead of
+  matching the empirical quantile to the quantile of a Normal
+  distribution, DESeq2 now uses the weighted quantile function
+  of the \CRANpkg{Hmisc} package. The weighting is described in the
+  manual page for *nbinomWaldTest*.  The weights are the
+  inverse of the expected variance of log counts (as used in the
+  diagonals of the matrix $W$ in the GLM). The effect of the change
+  is that the estimated prior variance is robust against noisy
+  estimates of log fold change from genes with very small
+  counts. This change was introduced in version 1.6 (October 2014).
+
+For a list of all changes since version 1.0.0, see the `NEWS` file
+included in the package.
+
+## Count outlier detection 
+
+DESeq2 relies on the negative binomial distribution to make
+estimates and perform statistical inference on differences.  While the
+negative binomial is versatile in having a mean and dispersion
+parameter, extreme counts in individual samples might not fit well to
+the negative binomial. For this reason, we perform automatic detection
+of count outliers. We use Cook's distance, which is a measure of how
+much the fitted coefficients would change if an individual sample were
+removed [@Cook1977Detection]. For more on the implementation of 
+Cook's distance see the manual page
+for the *results* function. Below we plot the maximum value of
+Cook's distance for each row over the rank of the test statistic 
+to justify its use as a filtering criterion.
+
+```{r cooksPlot}
+W <- res$stat
+maxCooks <- apply(assays(dds)[["cooks"]],1,max)
+idx <- !is.na(W)
+plot(rank(W[idx]), maxCooks[idx], xlab="rank of Wald statistic", 
+     ylab="maximum Cook's distance per gene",
+     ylim=c(0,5), cex=.4, col=rgb(0,0,0,.3))
+m <- ncol(dds)
+p <- 3
+abline(h=qf(.99, p, m - p))
+``` 
+
+## Contrasts 
+
+Contrasts can be calculated for a *DESeqDataSet* object for which
+the GLM coefficients have already been fit using the Wald test steps
+(*DESeq* with `test="Wald"` or using *nbinomWaldTest*).
+The vector of coefficients $\beta$ is left multiplied by the contrast vector $c$
+to form the numerator of the test statistic. The denominator is formed by multiplying
+the covariance matrix $\Sigma$ for the coefficients on either side by the 
+contrast vector $c$. The square root of this product is an estimate
+of the standard error for the contrast. The contrast statistic is then compared
+to a normal distribution as are the Wald statistics for the DESeq2
+package.
+
+$$ W = \frac{c^t \beta}{\sqrt{c^t \Sigma c}} $$
+
+## Expanded model matrices 
+
+DESeq2 uses *expanded model matrices* in conjunction with the log2
+fold change prior, in order to produce shrunken log2 fold change estimates and test 
+results which are independent of the choice of reference level. 
+Another way of saying this is that the shrinkage is *symmetric*
+with respect to all the levels of the factors in the design.
+The expanded model matrices differ from the standard model matrices, in that
+they have an indicator column (and therefore a coefficient) for
+each level of factors in the design formula in addition to an intercept. 
+Note that in version 1.10 and onward, standard model matrices are used for
+designs with interaction terms, as the shrinkage of log2 fold changes
+is not recommended for these designs.
+
+The expanded model matrices are not full rank, but a coefficient
+vector $\beta_i$ can still be found due to the zero-centered prior on
+non-intercept coefficients. The prior variance for the log2 fold
+changes is calculated by first generating maximum likelihood estimates
+for a standard model matrix. The prior variance for each level of a
+factor is then set as the average of the mean squared maximum
+likelihood estimates for each level and every possible contrast, such
+that that this prior value will be reference-level-independent. The
+`contrast` argument of the *results* function is
+used in order to generate comparisons of interest.
+
+<a name="indfilttheory"/>
+
+## Independent filtering and multiple testing 
+
+### Filtering criteria 
+
+The goal of independent filtering is to filter out those tests from
+the procedure that have no, or little chance of showing significant
+evidence, without even looking at their test statistic. Typically,
+this results in increased detection power at the same experiment-wide
+type I error. Here, we measure experiment-wide type I error in terms
+of the false discovery rate.
+
+A good choice for a filtering criterion is one that
+
+1) is statistically independent from the test statistic under the null hypothesis,
+2) is correlated with the test statistic under the alternative, and
+3) does not notably change the dependence structure -- if there is any -- between 
+   the tests that pass the filter, compared to the dependence structure
+   between the tests before filtering.
+
+The benefit from filtering relies on property (2), and we will explore
+it further below. Its statistical validity relies on
+property (1) -- which is simple to formally prove for many combinations
+of filter criteria with test statistics -- and (3), which is less
+easy to theoretically imply from first principles, but rarely a problem in practice.
+We refer to [@Bourgon:2010:PNAS] for further discussion of this topic.
+
+A simple filtering criterion readily available in the results object
+is the mean of normalized counts irrespective of biological condition,
+and so this is the criterion which is used automatically by the
+*results* function to perform independent filtering.  Genes with very
+low counts are not likely to see significant differences typically due
+to high dispersion. For example, we can plot the $-\log_{10}$ *p*
+values from all genes over the normalized mean counts:
+
+```{r indFilt}
+plot(res$baseMean+1, -log10(res$pvalue),
+     log="x", xlab="mean of normalized counts",
+     ylab=expression(-log[10](pvalue)),
+     ylim=c(0,30),
+     cex=.4, col=rgb(0,0,0,.3))
+```
+
+### Why does it work?
+
+Consider the *p* value histogram below
+It shows how the filtering ameliorates the multiple testing problem
+-- and thus the severity of a multiple testing adjustment -- by
+removing a background set of hypotheses whose *p* values are distributed
+more or less uniformly in [0,1].
+
+```{r histindepfilt}
+use <- res$baseMean > metadata(res)$filterThreshold
+h1 <- hist(res$pvalue[!use], breaks=0:50/50, plot=FALSE)
+h2 <- hist(res$pvalue[use], breaks=0:50/50, plot=FALSE)
+colori <- c(`do not pass`="khaki", `pass`="powderblue")
+``` 
+
+Histogram of p values for all tests.  The area shaded in blue
+indicates the subset of those that pass the filtering, the area in
+khaki those that do not pass: 
+
+```{r fighistindepfilt}
+barplot(height = rbind(h1$counts, h2$counts), beside = FALSE,
+        col = colori, space = 0, main = "", ylab="frequency")
+text(x = c(0, length(h1$counts)), y = 0, label = paste(c(0,1)),
+     adj = c(0.5,1.7), xpd=NA)
+legend("topright", fill=rev(colori), legend=rev(names(colori)))
+```
+
+<a name="FAQ"/>
+
+# Frequently asked questions 
+
+## How can I get support for DESeq2?
+
+We welcome questions about our software, and want to
+ensure that we eliminate issues if and when they appear. We have a few
+requests to optimize the process:
+
+* all questions should take place on the Bioconductor support
+  site: <https://support.bioconductor.org>, which serves as a
+  repository of questions and answers. This helps to save the
+  developers' time in responding to similar questions. Make sure to
+  tag your post with `deseq2`. It is often very helpful in addition 
+  to describe the aim of your experiment.
+* before posting, first search the Bioconductor support site
+  mentioned above for past threads which might have answered your
+  question.
+* if you have a question about the behavior of a function, read
+  the sections of the manual page for this function by typing a
+  question mark and the function name, e.g. `?results`.  We
+  spend a lot of time documenting individual functions and the exact
+  steps that the software is performing.
+* include all of your R code, especially the creation of the
+  *DESeqDataSet* and the design formula.  Include complete
+  warning or error messages, and conclude your message with the full
+  output of `sessionInfo()`.
+* if possible, include the output of
+  `as.data.frame(colData(dds))`, so that we can have a sense
+  of the experimental setup. If this contains confidential
+  information, you can replace the levels of those factors using
+  *levels()*.
+
+
+## Why are some *p* values set to NA?
+  
+See the details [above](#pvaluesNA).
+
+## How can I get unfiltered DESeq2 results?
+
+Users can obtain unfiltered GLM results, i.e. without outlier removal
+or independent filtering with the following call:
+
+```{r vanillaDESeq, eval=FALSE}
+dds <- DESeq(dds, minReplicatesForReplace=Inf)
+res <- results(dds, cooksCutoff=FALSE, independentFiltering=FALSE)
+```
+
+In this case, the only *p* values set to `NA` are those from
+genes with all counts equal to zero.
+
+## How do I use VST or rlog data for differential testing?
+  
+The variance stabilizing and rlog transformations are provided for
+applications other than differential testing, for example clustering
+of samples or other machine learning applications. For differential
+testing we recommend the *DESeq* function applied to raw
+counts as outlined [above](#de).
+  
+## Can I use DESeq2 to analyze paired samples?
+
+Yes, you should use a multi-factor design which includes the sample
+information as a term in the design formula. This will account for 
+differences between the samples while estimating the effect due to 
+the condition. The condition of interest should go at the end of the 
+design formula, e.g. `~ subject + condition`.
+
+## If I have multiple groups, should I run all together or split into pairs of groups?
+
+Typically, we recommend users to run samples from all groups together, and then
+use the `contrast` argument of the *results* function
+to extract comparisons of interest after fitting the model using *DESeq*.
+
+The model fit by *DESeq* estimates a single dispersion
+parameter for each gene, which defines how far we expect the observed
+count for a sample will be from the mean value from the model 
+given its size factor and its condition group. See the section
+[above](#theory) and the DESeq2 paper for full details.
+Having a single dispersion parameter for each gene is usually
+sufficient for analyzing multi-group data, as the final dispersion value will
+incorporate the within-group variability across all groups. 
+
+However, for some datasets, exploratory data analysis (EDA) plots
+could reveal that one or more groups has much 
+higher within-group variability than the others. A simulated example
+of such a set of samples is shown below.
+This is case where, by comparing groups A and B separately --
+subsetting a *DESeqDataSet* to only samples from those two
+groups and then running *DESeq* on this subset -- will be
+more sensitive than a model including all samples together.
+It should be noted that such an extreme range of within-group
+variability is not common, although it could arise if certain
+treatments produce an extreme reaction (e.g. cell death).
+Again, this can be easily detected from the EDA plots such as PCA
+described in this vignette.
+
+Here we diagram an extreme range of within-group variability with a
+simulated dataset. Typically, it is recommended to run *DESeq* across
+samples from all groups, for datasets with multiple groups. However,
+this simulated dataset shows a case where it would be preferable to
+compare groups A and B by creating a smaller dataset without the C
+samples. Group C has much higher within-group variability, which would
+inflate the per-gene dispersion estimate for groups A and B as well:
+
+```{r varGroup, echo=FALSE}
+set.seed(3)
+dds1 <- makeExampleDESeqDataSet(n=1000,m=12,betaSD=.3,dispMeanRel=function(x) 0.01)
+dds2 <- makeExampleDESeqDataSet(n=1000,m=12,
+                                betaSD=.3,
+                                interceptMean=mcols(dds1)$trueIntercept,
+                                interceptSD=0,
+                                dispMeanRel=function(x) 0.2)
+dds2 <- dds2[,7:12]
+dds2$condition <- rep("C",6)
+mcols(dds2) <- NULL
+dds <- cbind(dds1, dds2)
+rld <- rlog(dds, blind=FALSE, fitType="mean")
+plotPCA(rld)
+``` 
+
+## Can I run DESeq2 to contrast the levels of many groups?
+
+DESeq2 will work with any kind of design specified using the R
+formula. We enourage users to consider exploratory data analysis such
+as principal components analysis rather than performing statistical
+testing of all pairs of many groups of samples. Statistical testing is
+one of many ways of describing differences between samples.
+
+Regarding multiple test correction, if a user is planning to
+contrast all pairs of many levels, and then selectively reporting the
+results of only a *subset* of those pairs, one needs to perform multiple testing
+across *contrasts* as well as genes to control for this additional
+form of multiple testing. This can be done by using the `p.adjust`
+function across a long vector of *p* values from all pairs of
+contasts, then re-assigning these adjusted *p* values to the
+appropriate results table.
+
+As a speed concern with fitting very large models, 
+note that each additional level of a factor in the
+design formula adds another parameter to the GLM which is fit by
+DESeq2. Users might consider first removing genes with very few
+reads, e.g. genes with row sum of 1, as this will speed up the
+fitting procedure.
+
+## Can I use DESeq2 to analyze a dataset without replicates?
+
+If a *DESeqDataSet* is provided with an experimental design without replicates,
+a warning is printed, that the samples are treated as replicates
+for estimation of dispersion. This kind of analysis is
+only useful for exploring the data, but will not provide the kind of
+proper statistical inference on differences between groups.
+Without biological replicates, it is not possible to estimate the biological
+variability of each gene. 
+More details can be found in the manual page for `?DESeq`.
+
+## How can I include a continuous covariate in the design formula?
+
+Continuous covariates can be included in the design formula in exactly
+the same manner as factorial covariates, and then *results* for the
+continuous covariate can be extracted by specifying `name`.
+Continuous covariates might make sense in certain experiments, where a
+constant fold change might be 
+expected for each unit of the covariate.  However, in many cases, more
+meaningful results can be obtained by cutting continuous covariates
+into a factor defined over a small number of bins (e.g. 3-5).  In this
+way, the average effect of each group is controlled for, regardless of
+the trend over the continuous covariates.  In R, *numeric*
+vectors can be converted into *factors* using the function *cut*.
+
+## I ran a likelihood ratio test, but results() only gives me one comparison.
+
+"... How do I get the *p* values for all of the variables/levels 
+that were removed in the reduced design?"
+
+This is explained in the help page for `?results` in the
+section about likelihood ratio test p-values, but we will restate the
+answer here. When one performs a likelihood ratio test, the *p* values and
+the test statistic (the `stat` column) are values for the test
+that removes all of the variables which are present in the full
+design and not in the reduced design. This tests the null hypothesis
+that all the coefficients from these variables and levels of these factors
+are equal to zero.
+
+The likelihood ratio test *p* values therefore
+represent a test of *all the variables and all the levels of factors*
+which are among these variables. However, the results table only has space for
+one column of log fold change, so a single variable and a single
+comparison is shown (among the potentially multiple log fold changes
+which were tested in the likelihood ratio test). 
+This is indicated at the top of the results table
+with the text, e.g., log2 fold change (MLE): condition C vs A, followed
+by, LRT p-value: '~ batch + condition' vs '~ batch'.
+This indicates that the *p* value is for the likelihood ratio test of
+*all the variables and all the levels*, while the log fold change is a single
+comparison from among those variables and levels.
+See the help page for *results* for more details.
+
+## What are the exact steps performed by DESeq()?
+
+See the manual page for *DESeq*, which links to the 
+subfunctions which are called in order, where complete details are
+listed. Also you can read the three steps listed in the 
+[the DESeq2 model](#theory) in this document.
+
+
+## Is there an official Galaxy tool for DESeq2?
+
+Yes. The repository for the DESeq2 tool is
+
+<https://github.com/galaxyproject/tools-iuc/tree/master/tools/deseq2> 
+
+and a link to its location in the Tool Shed is 
+
+<https://toolshed.g2.bx.psu.edu/view/iuc/deseq2/d983d19fbbab>.
+
+## I want to benchmark DESeq2 comparing to other DE tools.
+
+One aspect which can cause problems for comparison is that, by default,
+DESeq2 outputs `NA` values for adjusted *p* values based on 
+independent filtering of genes which have low counts.
+This is a way for the DESeq2 to give extra
+information on why the adjusted *p* value for this gene is not small.
+Additionally, *p* values can be set to `NA` based on extreme 
+count outlier detection. These `NA` values should be considered
+*negatives* for purposes of estimating sensitivity and specificity. The
+easiest way to work with the adjusted *p* values in a benchmarking
+context is probably to convert these `NA` values to 1:
+
+```{r convertNA, eval=FALSE}
+res$padj <- ifelse(is.na(res$padj), 1, res$padj)
+``` 
+
+## I have trouble installing DESeq2 on Ubuntu/Linux...
+
+"*I try to install DESeq2 using biocLite(), but I get an error trying to
+install the R packages XML and/or RCurl:*"
+
+`ERROR: configuration failed for package XML`
+
+`ERROR: configuration failed for package RCurl`
+
+You need to install the following devel versions of packages using
+your standard package manager, e.g. `sudo apt-get install` or 
+`sudo apt install`
+
+* libxml2-dev
+* libcurl4-openssl-dev
+
+# Acknowledgments
+
+We have benefited in the development of DESeq2 from the help and
+feedback of many individuals, including but not limited to: 
+
+The Bionconductor Core Team,
+Alejandro Reyes, Andrzej Oles, Aleksandra Pekowska, Felix Klein,
+Nikolaos Ignatiadis,
+Vince Carey,
+Owen Solberg,
+Ruping Sun,
+Devon Ryan, 
+Steve Lianoglou, Jessica Larson, Christina Chaivorapol, Pan Du, Richard Bourgon,
+Willem Talloen, 
+Elin Videvall, Hanneke van Deutekom,
+Todd Burwell, 
+Jesse Rowley,
+Igor Dolgalev,
+Stephen Turner,
+Ryan C Thompson,
+Tyr Wiesner-Hanks,
+Konrad Rudolph,
+David Robinson,
+Mingxiang Teng,
+Mathias Lesche,
+Sonali Arora,
+Jordan Ramilowski,
+Ian Dworkin,
+Bjorn Gruning,
+Ryan McMinds,
+Paul Gordon,
+Leonardo Collado Torres,
+Enrico Ferrero.
+
+# Session info
+
+```{r sessionInfo}
+sessionInfo()
+```
+
+# References
+
diff --git a/vignettes/DESeq2.Rnw b/vignettes/DESeq2.Rnw
deleted file mode 100644
index 6e5e43b..0000000
--- a/vignettes/DESeq2.Rnw
+++ /dev/null
@@ -1,2414 +0,0 @@
-%\VignetteIndexEntry{Analyzing RNA-seq data with the "DESeq2" package}
-%\VignettePackage{DESeq2}
-%\VignetteEngine{knitr::knitr}
-
-% To compile this document
-% library('knitr'); rm(list=ls()); knit('DESeq2.Rnw')
-
-\documentclass{article}
-
-<<style, eval=TRUE, echo=FALSE, results="asis">>=
-BiocStyle::latex2()
-@
-
-\usepackage{subfig}% for combining multiple plots in one figure
-
-\newcommand{\deseqtwo}{\textit{DESeq2}}
-\newcommand{\lowtilde}{\raise.17ex\hbox{$\scriptstyle\mathtt{\sim}$}}
-
-<<knitr, echo=FALSE, results="hide">>=
-library("knitr")
-opts_chunk$set(
-  tidy=FALSE,
-  dev="png",
-  fig.show="hide",
-  fig.width=4, fig.height=4.5,
-  fig.pos="tbh",
-  cache=TRUE,
-  message=FALSE)
-@ 
-
-<<loadDESeq2, echo=FALSE>>=
-library("DESeq2")
-@
-
-\author{Michael I.~Love}
-\affil{Department of Biostatistics, Dana-Farber Cancer Institute and Harvard TH Chan School of Public Health, Boston, US;}
-
-\author{Simon Anders}
-\affil{Institute for Molecular Medicine Finland (FIMM), Helsinki, Finland;}
-
-\author{Wolfgang Huber}
-\affil{European Molecular Biology Laboratory (EMBL), Heidelberg, Germany}
-
-\title{Differential analysis of count data -- the DESeq2 package}
-
-\begin{document}
-
-\maketitle
-
-\begin{abstract}
-  A basic task in the analysis of count data from RNA-seq is the detection of
-  differentially expressed genes. The count data are presented as a table which reports,
-  for each sample, the number of sequence fragments that have been assigned to each
-  gene. Analogous data also arise for other assay types, including comparative ChIP-Seq,
-  HiC, shRNA screening, mass spectrometry.  An important analysis question is the
-  quantification and statistical inference of systematic changes between conditions, as
-  compared to within-condition variability. The package \deseqtwo{} provides
-  methods to test for differential expression by use of negative binomial generalized
-  linear models; the estimates of dispersion and logarithmic fold changes 
-  incorporate data-driven prior distributions\footnote{Other \Bioconductor{} packages 
-  with similar aims are \Biocpkg{edgeR}, \Biocpkg{limma},
-  \Biocpkg{DSS}, \Biocpkg{EBSeq} and \Biocpkg{baySeq}.}. 
-  This vignette explains the use of the package and demonstrates typical workflows.  
-  An RNA-seq workflow\footnote{\url{http://www.bioconductor.org/help/workflows/rnaseqGene/}} 
-  on the Bioconductor website covers similar material to this vignette
-  but at a slower pace, including the generation of count matrices
-  from FASTQ files.
-\end{abstract}
-
-\packageVersion{\Sexpr{BiocStyle::pkg_ver("DESeq2")}}
-
- \vspace{5mm}
-  
-  \begin{table}
-    \begin{tabular}{ | l | }
-      \hline 
-      If you use \deseqtwo{} in published research, please cite:  \\
-      \\
-      M. I. Love, W. Huber, S. Anders: \textbf{Moderated estimation of} \\
-      \textbf{fold change and dispersion for RNA-seq data with DESeq2}. \\
-      \emph{Genome Biology} 2014, \textbf{15}:550. \\
-      \url{http://dx.doi.org/10.1186/s13059-014-0550-8}  \\
-      \hline 
-    \end{tabular}
-  \end{table}
-
-
-<<options, results="hide", echo=FALSE>>=
-options(digits=3, prompt=" ", continue=" ")
-@
-
-\newpage
-
-\tableofcontents
-
-\newpage
-
-\section{Standard workflow}
-
-\subsection{Quick start}
-
-Here we show the most basic steps for a differential expression analysis.
-These steps require you have a \Rclass{RangedSummarizedExperiment} object
-\Robject{se} which contains the counts and information about samples.
-The \Robject{design} indicates that we want to
-measure the effect of condition, controlling for batch differences.
-The two factor variables \Robject{batch} and \Robject{condition} 
-should be columns of \Robject{colData(se)}.
-
-<<quick, eval=FALSE>>=
-dds <- DESeqDataSet(se, design = ~ batch + condition)
-dds <- DESeq(dds)
-res <- results(dds, contrast=c("condition","trt","con"))
-@
-
-If you have a count matrix and sample information table, the first
-line would use \Rfunction{DESeqDataSetFromMatrix} instead of 
-\Rfunction{DESeqDataSet}, as shown in Section~\ref{sec:countmat}.
-
-\subsection{How to get help}
-
-All \deseqtwo{} questions should be posted to the Bioconductor support
-site: \url{https://support.bioconductor.org}, which serves as a
-repository of questions and answers. See the first question in the
-list of Frequently Asked Questions (Section \ref{sec:faq})
-for more information about how to construct an informative post.
-
-\subsection{Input data} \label{sec:prep}
-
-\subsubsection{Why un-normalized counts?}
-
-As input, the \deseqtwo{} package expects count data as obtained, e.\,g.,
-from RNA-seq or another high-throughput sequencing experiment, in the form of a
-matrix of integer values. The value in the $i$-th row and the $j$-th column of
-the matrix tells how many reads can be assigned to gene $i$ in sample $j$.
-Analogously, for other types of assays, the rows of the matrix might correspond
-e.\,g.\ to binding regions (with ChIP-Seq) or peptide sequences (with
-quantitative mass spectrometry). We will list method for obtaining count matrices
-in sections below.
-
-The values in the matrix should be un-normalized counts of sequencing reads (for
-single-end RNA-seq) or fragments (for paired-end RNA-seq). 
-The \href{http://www.bioconductor.org/help/workflows/rnaseqGene/}{RNA-seq workflow}
-describes multiple techniques for preparing such count matrices.
-It is important to provide count matrices as input for \deseqtwo{}'s
-statistical model \cite{Love2014} to hold, as only the  
-count values allow assessing the measurement precision correctly. The \deseqtwo{}
-model internally corrects for library size, so transformed or
-normalized values such as counts scaled by library size should not
-be used as input. 
-
-\subsubsection{\Rclass{SummarizedExperiment} input} \label{sec:sumExpInput}
-
-The class used by the \deseqtwo{} package to store the read counts 
-is \Rclass{DESeqDataSet} which extends the \Rclass{RangedSummarizedExperiment} 
-class of the \Biocpkg{SummarizedExperiment} package. 
-This facilitates preparation steps and also downstream exploration of results. 
-For counting aligned reads in genes, the \Rfunction{summarizeOverlaps} function of
-\Biocpkg{GenomicAlignments} with \Robject{mode="Union"} is
-encouraged, resulting in a \Rclass{RangedSummarizedExperiment} object.
-Other methods for obtaining count matrices are described in the next section.
-
-An example of the steps to produce a \Rclass{RangedSummarizedExperiment} can
-be found in an RNA-seq workflow on the Bioconductor 
-website: \url{http://www.bioconductor.org/help/workflows/rnaseqGene/}
-and in the vignette for the data package \Biocexptpkg{airway}.
-Here we load the \Rclass{RangedSummarizedExperiment} from that package in
-order to build a \Rclass{DESeqDataSet}.
-
-<<loadSumExp>>=
-library("airway")
-data("airway")
-se <- airway
-@
-
-A \Rclass{DESeqDataSet} object must have an associated design formula.  
-The design formula expresses the variables which will be
-used in modeling. The formula should be a tilde ($\sim$) followed by the
-variables with plus signs between them (it will be coerced into an
-\Rclass{formula} if it is not already).  An intercept is included,
-representing the base mean of counts. The design can be changed later, 
-however then all differential analysis steps should be repeated, 
-as the design formula is used to estimate the dispersions and 
-to estimate the log2 fold changes of the model. 
-The constructor function below shows the generation of a
-\Rclass{DESeqDataSet} from a \Rclass{RangedSummarizedExperiment} \Robject{se}. 
-
-\emph{Note}: In order to benefit from the default settings of the
-package, you should put the variable of interest at the end of the
-formula and make sure the control level is the first level.
-
-<<sumExpInput>>=
-library("DESeq2")
-ddsSE <- DESeqDataSet(se, design = ~ cell + dex)
-ddsSE
-@
-
-\subsubsection{Count matrix input} \label{sec:countmat}
-
-Alternatively, the function \Rfunction{DESeqDataSetFromMatrix} can be
-used if you already have a matrix of read counts prepared from another
-source. Another method for quickly producing count matrices 
-from alignment files is the \Rfunction{featureCounts} function
-in the \Biocpkg{Rsubread} package.
-To use \Rfunction{DESeqDataSetFromMatrix}, the user should provide 
-the counts matrix, the information about the samples (the columns of the 
-count matrix) as a \Rclass{DataFrame} or \Rclass{data.frame}, 
-and the design formula.
-
-To demonstate the use of \Rfunction{DESeqDataSetFromMatrix}, 
-we will read in count data from the \Biocexptpkg{pasilla} package.
-We read in a count matrix, which we will name \Robject{countData}, 
-and the sample information table, which we will name \Robject{colData}. 
-Further below we describe how to extract 
-these objects from, e.g. \Rfunction{featureCounts} output.
-
-<<loadPasilla>>=
-library("pasilla")
-pasCts <- system.file("extdata", "pasilla_gene_counts.tsv",
-                 package="pasilla", mustWork=TRUE)
-pasAnno <- system.file("extdata", "pasilla_sample_annotation.csv",
-                       package="pasilla", mustWork=TRUE)
-countData <- as.matrix(read.csv(pasCts,sep="\t",row.names="gene_id"))
-colData <- read.csv(pasAnno, row.names=1)
-colData <- colData[,c("condition","type")]
-@ 
-
-We examine the count matrix and column data to see if they are consisent:
-
-<<showPasilla>>=
-head(countData)
-head(colData)
-@ 
-
-Note that these are not in the same order with respect to samples! 
-It is critical that the columns of the count matrix and the rows of
-the column data (information about samples) are in the same order..
-We should re-arrange one or the other so that they are consistent in
-terms of sample order (if we do not, later functions would produce
-an error). We additionally need to chop off the \Robject{"fb"} of the 
-row names of \Robject{colData}, so the naming is consistent.
-
-<<reorderPasila>>=
-rownames(colData) <- sub("fb","",rownames(colData))
-all(rownames(colData) %in% colnames(countData))
-countData <- countData[, rownames(colData)]
-all(rownames(colData) == colnames(countData))
-@ 
-
-If you have used the \Rfunction{featureCounts} function in the 
-\Biocpkg{Rsubread} package, the matrix of read counts can be directly 
-provided from the \Robject{"counts"} element in the list output.
-The count matrix and column data can typically be read into R 
-from flat files using base R functions such as \Rfunction{read.csv} 
-or \Rfunction{read.delim}.
-For \textit{HTSeq} count files, see the dedicated input function below.
-
-With the count matrix, \Robject{countData}, and the sample
-information, \Robject{colData}, we can construct a \Rclass{DESeqDataSet}:
-
-<<matrixInput>>=
-dds <- DESeqDataSetFromMatrix(countData = countData,
-                              colData = colData,
-                              design = ~ condition)
-dds
-@
-
-If you have additional feature data, it can be added to the
-\Rclass{DESeqDataSet} by adding to the metadata columns of a newly
-constructed object. (Here we add redundant data just for demonstration, as
-the gene names are already the rownames of the \Robject{dds}.)
-
-<<addFeatureData>>=
-featureData <- data.frame(gene=rownames(countData))
-(mcols(dds) <- DataFrame(mcols(dds), featureData))
-@ 
-
-\subsubsection{tximport: transcript abundance summarized to gene-level}
-
-Users can create gene-level count matrices for use with \deseqtwo{}
-by importing information using the \Biocpkg{tximport} package.
-This workflow allows users to import transcript abundance estimates
-from a variety of external software, including the following methods:
-
-\begin{itemize}
-\item \href{http://www.cs.cmu.edu/~ckingsf/software/sailfish/}{Sailfish} 
-  \cite{Patro2014Sailfish}
-\item \href{http://combine-lab.github.io/salmon/}{Salmon} 
-  \cite{Patro2015Salmon}
-\item
-  \href{https://pachterlab.github.io/kallisto/about.html}{kallisto} 
-  \cite{Bray2015Near}
-\item \href{http://deweylab.github.io/RSEM/}{RSEM} 
-  \cite{Li2011RSEM}
-\end{itemize}
-
-Some advantages of using the above methods for transcript abundance
-estimation are: (i) this approach corrects for potential changes
-in gene length across samples 
-(e.g. from differential isoform usage) \cite{Trapnell2013Differential},
-(ii) some of these methods (\textit{Sailfish, Salmon, kallisto}) 
-are substantially faster and require less memory
-and disk usage compared to alignment-based methods that require
-creation and storage of BAM files, and
-(iii) it is possible to avoid discarding those fragments that can
-align to multiple genes with homologous sequence, thus increasing
-sensitivity \cite{Robert2015Errors}.
-
-Full details on the motivation and methods for importing transcript
-level abundance and count estimates, summarizing to gene-level count matrices 
-and producing an offset which corrects for potential changes in average
-transcript length across samples are described in \cite{Soneson2015}.
-The \textit{tximport}$\rightarrow$\deseqtwo{} approach uses rounded estimated
-gene counts (but not normalized) instead of the raw count of fragments
-which can be unambiguously assigned to a gene.
-
-Here, we demonstrate how to import transcript abundances
-and construct of a gene-level \Rclass{DESeqDataSet} object
-from \textit{Sailfish} \texttt{quant.sf} files, which are
-stored in the \Biocexptpkg{tximportData} package.
-Note that, instead of locating \Robject{dir} using \Rfunction{system.file},
-a user would typically just provide a path, e.g. \texttt{/path/to/quant/files}.
-For further details on use of \Rfunction{tximport}, 
-including the construction of the \Robject{tx2gene} table for linking
-transcripts to genes, please refer to the \Biocpkg{tximport} package vignette. 
-
-<<tximport>>=
-library("tximport")
-library("readr")
-library("tximportData")
-dir <- system.file("extdata", package="tximportData")
-samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
-files <- file.path(dir,"salmon", samples$run, "quant.sf")
-names(files) <- paste0("sample",1:6)
-tx2gene <- read.csv(file.path(dir, "tx2gene.csv"))
-txi <- tximport(files, type="salmon", tx2gene=tx2gene, reader=read_tsv)
-@ 
-
-Next we create an condition vector to demonstrate building an
-\Robject{DESeqDataSet}. For a typical use, this information would already
-be present as a column of the \Robject{samples} table.
-The best practice is to read \Robject{colData} from a CSV or TSV file, 
-and to construct \Robject{files} 
-from a column of \Robject{colData}, as shown in the code chunk above.
-
-<<txi2dds>>=
-coldata <- data.frame(condition=factor(rep(c("A","B"),each=3)))
-rownames(coldata) <- colnames(txi$counts)
-ddsTxi <- DESeqDataSetFromTximport(txi, colData=coldata,
-                                   design=~ condition)
-@
-
-The \Robject{ddsTxi} object can then be used as \Robject{dds} in the
-following analysis steps.
-
-\subsubsection{\textit{HTSeq} input}
-
-You can use the function \Rfunction{DESeqDataSetFromHTSeqCount} if you
-have \texttt{htseq-count} from the \textit{HTSeq} python  
-package\footnote{available from \url{http://www-huber.embl.de/users/anders/HTSeq}, described in \cite{Anders:2014:htseq}}.  
-For an example of using the python scripts, see the
-\Biocexptpkg{pasilla} data package. First you will want to specify a
-variable which points to the directory in which the \textit{HTSeq}
-output files are located. 
-
-<<htseqDirI, eval=FALSE>>=
-directory <- "/path/to/your/files/"
-@ 
-
-However, for demonstration purposes only, the following line of
-code points to the directory for the demo \textit{HTSeq} output
-files packages for the \Biocexptpkg{pasilla} package.
-
-<<htseqDirII>>=
-directory <- system.file("extdata", package="pasilla", mustWork=TRUE)
-@ 
-
-We specify which files to read in using \Rfunction{list.files},
-and select those files which contain the string \Robject{"treated"} 
-using \Rfunction{grep}. The \Rfunction{sub} function is used to 
-chop up the sample filename to obtain the condition status, or 
-you might alternatively read in a phenotypic table 
-using \Rfunction{read.table}.
-
-<<htseqInput>>=
-sampleFiles <- grep("treated",list.files(directory),value=TRUE)
-sampleCondition <- sub("(.*treated).*","\\1",sampleFiles)
-sampleTable <- data.frame(sampleName = sampleFiles,
-                          fileName = sampleFiles,
-                          condition = sampleCondition)
-ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
-                                       directory = directory,
-                                       design= ~ condition)
-ddsHTSeq
-@
-
-\subsubsection{Pre-filtering}
-
-While it is not necessary to pre-filter low count genes before running the \deseqtwo{}
-functions, there are two reasons which make pre-filtering useful:
-by removing rows in which there are no reads or nearly no reads,
-we reduce the memory size of the \Robject{dds} data object and 
-we increase the speed of the transformation
-and testing functions within \deseqtwo{}. Here we perform a minimal
-pre-filtering to remove rows that have only 0 or 1 read. Note that more strict
-filtering to increase power is \textit{automatically} applied via independent filtering
-on the mean of normalized counts within the \Rfunction{results}
-function, which will be discussed in Section~\ref{sec:autoFilt}.
-
-<<prefilter>>=
-dds <- dds[ rowSums(counts(dds)) > 1, ]
-@ 
-
-\subsubsection{Note on factor levels} \label{sec:factorLevels}
-
-By default, R will choose a \textit{reference level} for factors based
-on alphabetical order. Then, if you never tell the \deseqtwo{} functions
-which level you want to compare against (e.g. which level represents
-the control group), the comparisons will be based on the alphabetical
-order of the levels. There are two solutions: you can either
-explicitly tell \Rfunction{results} which comparison to make using the
-\Robject{contrast} argument (this will be shown later), or you can
-explicitly set the factors levels. Setting the factor levels can be done in two ways,
-either using factor:
-
-<<factorlvl>>=
-dds$condition <- factor(dds$condition, levels=c("untreated","treated"))
-@ 
-
-...or using \Rfunction{relevel}, just specifying the reference level:
-
-<<relevel>>=
-dds$condition <- relevel(dds$condition, ref="untreated")
-@ 
-
-If you need to subset the columns of a \Rclass{DESeqDataSet},
-i.e., when removing certain samples from the analysis, it is possible
-that all the samples for one or more levels of a variable in the design
-formula would be removed. In this case, the \Rfunction{droplevels} function can be used
-to remove those levels which do not have samples in the current \Rclass{DESeqDataSet}:
-
-<<droplevels>>=
-dds$condition <- droplevels(dds$condition)
-@ 
-
-\subsubsection{Collapsing technical replicates}
-
-\deseqtwo{} provides a function \Rfunction{collapseReplicates} which can
-assist in combining the counts from technical replicates into single
-columns of the count matrix. The term ``technical replicate'' 
-implies multiple sequencing runs of the same library. 
-You should not collapse biological replicates using this function.
-See the manual page for an example of the use of
-\Rfunction{collapseReplicates}. 
-
-\subsubsection{About the pasilla dataset}
-
-We continue with the \Biocexptpkg{pasilla} data constructed from the
-count matrix method above. This data set is from an experiment on
-\emph{Drosophila melanogaster} cell cultures and investigated the
-effect of RNAi knock-down of the splicing factor \emph{pasilla}
-\cite{Brooks2010}.  The detailed transcript of the production of
-the \Biocexptpkg{pasilla} data is provided in the vignette of the 
-data package \Biocexptpkg{pasilla}.
-
-\subsection{Differential expression analysis} \label{sec:de}
-
-The standard differential expression analysis steps are wrapped
-into a single function, \Rfunction{DESeq}. The estimation steps performed
-by this function are described in Section~\ref{sec:glm}, in the manual page for
-\Robject{?DESeq} and in the Methods section of the \deseqtwo{} publication \cite{Love2014}. 
-The individual sub-functions which are called by \Rfunction{DESeq}
-are still available, described in Section~\ref{sec:steps}. 
-
-Results tables are generated using the function \Rfunction{results}, which
-extracts a results table with log2 fold changes, $p$ values and adjusted
-$p$ values. With no arguments to \Rfunction{results}, the results will be for
-the last variable in the design formula, and if this is a factor, 
-the comparison will be the last level of this variable over the first level. 
-Details about the comparison are printed to the console. The text, \texttt{condition}
-\texttt{treated vs untreated}, tells you that the estimates are of the logarithmic
-fold change $\log_2 ( \textrm{treated} / \textrm{untreated} )$.
-
-<<deseq>>=
-dds <- DESeq(dds)
-res <- results(dds)
-res
-@ 
-
-These steps should take less than 30 seconds for most analyses. For
-experiments with many samples (e.g. 100 samples), one can take
-advantage of parallelized computation.  Both of the above functions
-have an argument \Robject{parallel} which if set to \Robject{TRUE} can
-be used to distribute computation across cores specified by the
-\Rfunction{register} function of \Biocpkg{BiocParallel}. For example,
-the following chunk (not evaluated here), would register 4 cores, and
-then the two functions above, with \Robject{parallel=TRUE}, would
-split computation over these cores. 
-
-<<parallel, eval=FALSE>>=
-library("BiocParallel")
-register(MulticoreParam(4))
-@
-
-We can order our results table by the smallest adjusted $p$ value:
-
-<<resOrder>>=
-resOrdered <- res[order(res$padj),]
-@
-
-We can summarize some basic tallies using the
-\Rfunction{summary} function.
-
-<<sumRes>>=
-summary(res)
-@ 
-
-How many adjusted p-values were less than 0.1?
-
-<<sumRes01>>=
-sum(res$padj < 0.1, na.rm=TRUE)
-@ 
-
-The \Rfunction{results} function contains a number of arguments to
-customize the results table which is generated.  Note that the
-\Rfunction{results} function automatically performs independent
-filtering based on the mean of normalized counts for each gene,
-optimizing the number of genes which will have an adjusted $p$ value
-below a given FDR cutoff, \Robject{alpha}.
-Independent filtering is further discussed in Section~\ref{sec:autoFilt}.
-By default the argument
-\Robject{alpha} is set to $0.1$.  If the adjusted $p$ value cutoff
-will be a value other than $0.1$, \Robject{alpha} should be set to
-that value:
-
-<<resAlpha05>>=
-res05 <- results(dds, alpha=0.05)
-summary(res05)
-sum(res05$padj < 0.05, na.rm=TRUE)
-@ 
-
-A generalization of the idea of $p$ value filtering is to \textit{weight} hypotheses
-to optimize power. A new Bioconductor package, \Biocpkg{IHW}, is now available
-that implements the method of \textit{Independent Hypothesis Weighting} \cite{Ignatiadis2015}.
-Here we show the use of \textit{IHW} for $p$ value adjustment of \deseqtwo{} results.
-For more details, please see the vignette of the \Biocpkg{IHW} package.
-Note that the \textit{IHW} result object is stored in the metadata.
-
-<<IHW>>=
-library("IHW")
-resIHW <- results(dds, filterFun=ihw)
-summary(resIHW)
-sum(resIHW$padj < 0.1, na.rm=TRUE)
-metadata(resIHW)$ihwResult
-@ 
-
-If a multi-factor design is used, or if the variable in the design
-formula has more than two levels, the \Robject{contrast} argument of
-\Rfunction{results} can be used to extract different comparisons from
-the \Rclass{DESeqDataSet} returned by \Rfunction{DESeq}.
-Multi-factor designs are discussed further in Section~\ref{sec:multifactor},
-and the use of the \Robject{contrast} argument is dicussed in Section~\ref{sec:contrasts}.
-
-For advanced users, note that all the values calculated by the \deseqtwo{} 
-package are stored in the \Rclass{DESeqDataSet} object, and access 
-to these values is discussed in Section~\ref{sec:access}.
-
-\subsection{Exploring and exporting results}
-
-\subsubsection{MA-plot}
-
-\begin{figure}[tb]
-\includegraphics[width=.49\textwidth]{figure/MANoPrior-1}
-\includegraphics[width=.49\textwidth]{figure/MA-1}
-\caption{
-  MA-plot.
-  These plots show the log2 fold changes from the treatment over
-  the mean of normalized counts, i.e. the average of counts normalized by
-  size factors. The left plot shows the ``unshrunken'' log2 fold changes, 
-  while the right plot, produced by the code above, shows the shrinkage 
-  of log2 fold changes resulting from the incorporation of zero-centered
-  normal prior. The shrinkage is greater for the log2 fold change
-  estimates from genes with low counts and high dispersion, 
-  as can be seen by the narrowing of spread of leftmost points 
-  in the right plot.}
-\label{fig:MA}
-\end{figure}
-
-In \deseqtwo{}, the function \Rfunction{plotMA} shows the log2
-fold changes attributable to a given variable over the mean of normalized counts.
-Points will be colored red if the adjusted $p$ value is less than 0.1.  
-Points which fall out of the window are plotted as open triangles pointing 
-either up or down.
-
-<<MA, fig.width=4.5, fig.height=4.5>>=
-plotMA(res, main="DESeq2", ylim=c(-2,2))
-@
-
-After calling \Rfunction{plotMA}, one can use the function
-\Rfunction{identify} to interactively detect the row number of
-individual genes by clicking on the plot. One can then recover
-the gene identifiers by saving the resulting indices:
-
-<<MAidentify, eval=FALSE>>=
-idx <- identify(res$baseMean, res$log2FoldChange)
-rownames(res)[idx]
-@ 
-
-The MA-plot of log2 fold changes returned by \deseqtwo{} allows us to
-see how the shrinkage of fold changes works for genes with low
-counts. You can still obtain results tables which include the
-``unshrunken'' log2 fold changes (for a simple comparison, the ratio
-of the mean normalized counts in the two groups). A column
-\Robject{lfcMLE} with the unshrunken maximum likelihood estimate (MLE)
-for the log2 fold change will be added with an additional argument to
-\Rfunction{results}:
-
-<<resMLE>>=
-resMLE <- results(dds, addMLE=TRUE)
-head(resMLE, 4)
-@ 
-
-One can make an MA-plot of the unshrunken estimates like so:
-
-<<MANoPrior, fig.width=4.5, fig.height=4.5>>=
-plotMA(resMLE, MLE=TRUE, main="unshrunken LFC", ylim=c(-2,2))
-@
-
-\subsubsection{Plot counts} \label{sec:plotcounts}
-
-It can also be useful to examine the counts of reads for a single gene
-across the groups. A simple function for making this
-plot is \Rfunction{plotCounts}, which normalizes counts by sequencing depth
-and adds a pseudocount of $\frac{1}{2}$ to allow for log scale plotting.
-The counts are grouped by the variables in \Robject{intgroup}, where
-more than one variable can be specified. Here we specify the gene
-which had the smallest $p$ value from the results table created
-above. You can select the gene to plot by rowname or by numeric index.
-
-<<plotCounts, dev="pdf", fig.width=4.5, fig.height=5>>=
-plotCounts(dds, gene=which.min(res$padj), intgroup="condition")
-@ 
-
-For customized plotting, an argument \Robject{returnData} specifies
-that the function should only return a \Rclass{data.frame} for
-plotting with \Rfunction{ggplot}.
-
-<<plotCountsAdv, dev="pdf", fig.width=3.5, fig.height=3.5>>=
-d <- plotCounts(dds, gene=which.min(res$padj), intgroup="condition", 
-                returnData=TRUE)
-library("ggplot2")
-ggplot(d, aes(x=condition, y=count)) + 
-  geom_point(position=position_jitter(w=0.1,h=0)) + 
-  scale_y_log10(breaks=c(25,100,400))
-@ 
-
-\begin{figure}
-\includegraphics[width=.49\textwidth]{figure/plotCounts-1}
-\includegraphics[width=.49\textwidth]{figure/plotCountsAdv-1}
-\caption{
-  Plot of counts for one gene.
-  The plot of normalized counts (plus a pseudocount of $\frac{1}{2}$)
-  either made using the \Rfunction{plotCounts} function (left)
-  or using another plotting library (right, using \CRANpkg{ggplot2}).}
-\label{fig:plotcounts}
-\end{figure}
-
-\subsubsection{More information on results columns} \label{sec:moreInfo}
-
-Information about which variables and tests were used can be found by calling
-the function \Rfunction{mcols} on the results object.
-
-<<metadata>>=
-mcols(res)$description
-@
-
-For a particular gene, a log2 fold change of $-1$ for
-\Robject{condition treated vs untreated} means that the treatment
-induces a multiplicative change in observed gene expression level of
-$2^{-1} = 0.5$ compared to the untreated condition. If the variable of
-interest is continuous-valued, then the reported log2 fold change is
-per unit of change of that variable.
-
-\textbf{Note on p-values set to NA}: some values in the results table
-can be set to \Robject{NA} for one of the following reasons:
-
-\begin{enumerate} 
-  \item If within a row, all samples have zero counts, 
-    the \Robject{baseMean} column will be zero, and the
-    log2 fold change estimates, $p$ value and adjusted $p$ value
-    will all be set to \texttt{NA}.
-  \item If a row contains a sample with an extreme count outlier
-    then the $p$ value and adjusted $p$ value will be set to \texttt{NA}.
-    These outlier counts are detected by Cook's distance. Customization
-    of this outlier filtering and description of functionality for 
-    replacement of outlier counts and refitting is described in 
-    Section~\ref{sec:outlierApproach},
-  \item If a row is filtered by automatic independent filtering, 
-    for having a low mean normalized count, then only the adjusted $p$
-    value will be set to \texttt{NA}. 
-    Description and customization of independent filtering is 
-    described in Section~\ref{sec:autoFilt}.
-\end{enumerate}
-
-\subsubsection{Rich visualization and reporting of results}
-
-\textbf{ReportingTools.} An HTML report of the results with plots and sortable/filterable columns
-can be generated using the \Biocpkg{ReportingTools} package
-on a \Rclass{DESeqDataSet} that has been processed by the \Rfunction{DESeq} function.
-For a code example, see the ``RNA-seq differential expression'' vignette at
-the \Biocpkg{ReportingTools} page, or the manual page for the 
-\Rfunction{publish} method for the \Rclass{DESeqDataSet} class.
-
-\textbf{regionReport.} An HTML and PDF summary of the results with plots
-can also be generated using the \Biocpkg{regionReport} package.
-The \Rfunction{DESeq2Report} function should be run on a 
-\Rclass{DESeqDataSet} that has been processed by the \Rfunction{DESeq} function.
-For more details see the manual page for \Rfunction{DESeq2Report} 
-and an example vignette in the \Biocpkg{regionReport} package.
-
-\textbf{Glimma.} Interactive visualization of \deseqtwo{} output, 
-including MA-plots (also called MD-plot) can be generated using the
-\Biocpkg{Glimma} package. See the manual page for \Rfunction{glMDPlot.DESeqResults}.
-
-\textbf{pcaExplorer.} Interactive visualization of \deseqtwo{} output,
-including PCA plots, boxplots of counts and other useful summaries can be
-generated using the \Biocpkg{pcaExplorer} package.
-See the ``Launching the application'' section of the package vignette.
-
-\subsubsection{Exporting results to CSV files}
-
-A plain-text file of the results can be exported using the 
-base \R{} functions \Rfunction{write.csv} or \Rfunction{write.delim}. 
-We suggest using a descriptive file name indicating the variable
-and levels which were tested.
-
-<<export, eval=FALSE>>=
-write.csv(as.data.frame(resOrdered), 
-          file="condition_treated_results.csv")
-@
-
-Exporting only the results which pass an adjusted $p$ value
-threshold can be accomplished with the \Rfunction{subset} function,
-followed by the \Rfunction{write.csv} function.
-
-<<subset>>=
-resSig <- subset(resOrdered, padj < 0.1)
-resSig
-@ 
-
-\subsection{Multi-factor designs} \label{sec:multifactor}
-
-Experiments with more than one factor influencing the counts can be
-analyzed using design formula that include the additional variables.  
-By adding these to the design, one can control for additional variation
-in the counts. For example, if the condition samples are balanced
-across experimental batches, by including the \Robject{batch} factor to the
-design, one can increase the sensitivity for finding differences due
-to \Robject{condition}. There are multiple ways to analyze experiments when the
-additional variables are of interest and not just controlling factors 
-(see Section \ref{sec:interactions} on interactions).
-
-The data in the \Biocexptpkg{pasilla} package have a condition of interest 
-(the column \Robject{condition}), as well as information on the type of sequencing 
-which was performed (the column \Robject{type}), as we can see below:
-
-<<multifactor>>=
-colData(dds)
-@
-
-We create a copy of the \Rclass{DESeqDataSet}, so that we can rerun
-the analysis using a multi-factor design.
-
-<<copyMultifactor>>=
-ddsMF <- dds
-@
-
-We can account for the different types of sequencing, and get a clearer picture
-of the differences attributable to the treatment.  As \Robject{condition} is the
-variable of interest, we put it at the end of the formula. Thus the \Rfunction{results}
-function will by default pull the \Robject{condition} results unless 
-\Robject{contrast} or \Robject{name} arguments are specified. 
-Then we can re-run \Rfunction{DESeq}:
-
-<<replaceDesign>>=
-design(ddsMF) <- formula(~ type + condition)
-ddsMF <- DESeq(ddsMF)
-@
-
-Again, we access the results using the \Rfunction{results} function.
-
-<<multiResults>>=
-resMF <- results(ddsMF)
-head(resMF)
-@
-
-It is also possible to retrieve the log2 fold changes, $p$ values and adjusted
-$p$ values of the \Robject{type} variable. The \Robject{contrast} argument of 
-the function \Rfunction{results} takes a character vector of length three:
-the name of the variable, the name of the factor level for the numerator
-of the log2 ratio, and the name of the factor level for the denominator.
-The \Robject{contrast} argument can also take other forms, as
-described in the help page for \Rfunction{results} and in Section~\ref{sec:contrasts}.
-
-<<multiTypeResults>>=
-resMFType <- results(ddsMF,
-                     contrast=c("type", "single-read", "paired-end"))
-head(resMFType)
-@
-
-If the variable is continuous or an interaction term (see Section~\ref{sec:interactions})
-then the results can be extracted using the \Robject{name} argument to \Rfunction{results},
-where the name is one of elements returned by \Robject{resultsNames(dds)}.
-
-\newpage
-
-%---------------------------------------------------
-\section{Data transformations and visualization} \label{sec:transf}
-%---------------------------------------------------
-\subsection{Count data transformations}
-%---------------------------------------------------
-
-In order to test for differential expression, we operate on raw counts
-and use discrete distributions as described in the previous Section~\ref{sec:de}.
-However for other downstream analyses -- 
-e.g. for visualization or clustering -- it might be useful 
-to work with transformed versions of the count data. 
-
-Maybe the most obvious choice of transformation is the logarithm.
-Since count values for a gene can be zero in some
-conditions (and non-zero in others), some advocate the use of
-\emph{pseudocounts}, i.\,e.\ transformations of the form
-%
-\begin{equation}\label{eq:shiftedlog}
-  y = \log_2(n + 1)\quad\mbox{or more generally,}\quad y = \log_2(n + n_0),
-\end{equation}
-%
-where $n$ represents the count values and $n_0$ is a positive constant.
-
-In this section, we discuss two alternative
-approaches that offer more theoretical justification and a rational way
-of choosing the parameter equivalent to $n_0$ above.
-The \emph{regularized logarithm} or \emph{rlog} incorporates a prior on
-the sample differences \cite{Love2014}, 
-and the other uses the concept of variance stabilizing
-transformations (VST) \cite{Tibshirani1988,sagmb2003,Anders:2010:GB}.
-Both transformations produce transformed data on the $\log_2$ scale
-which has been normalized with respect to library size.
-
-The point of these two transformations, the \emph{rlog} and the VST,
-is to remove the dependence of the variance on the mean,
-particularly the high variance of the logarithm of count data when the
-mean is low. Both \emph{rlog} and VST use the experiment-wide trend
-of variance over mean, in order to transform the data to remove the
-experiment-wide trend. Note that we do not require or
-desire that all the genes have \emph{exactly} the same variance after
-transformation. Indeed, in Figure~\ref{fig:meansd} below, you will see
-that after the transformations the genes with the same mean do not
-have exactly the same standard deviations, but that the
-experiment-wide trend has flattened. It is those genes with row
-variance above the trend which will allow us to cluster samples into
-interesting groups.
-
-\textbf{Note on running time:} if you have many samples (e.g. 100s),
-the \Rfunction{rlog} function might take too long, and the 
-\Rfunction{varianceStabilizingTransformation} is a faster choice.  
-The rlog and VST have similar properties, but the rlog requires fitting a shrinkage
-term for each sample and each gene which takes time.  See the
-\deseqtwo{} paper for more discussion on the differences
-\cite{Love2014}. In addition, a new function \Rfunction{vst} provides
-an even faster version of the \Rfunction{varianceStabilizingTransformation}
-but calculating the global dispersion trend on a subset of the genes
-(default 1000). \Rfunction{vst} may be attractive for interactive EDA.
-
-\subsubsection{Blind dispersion estimation}
-
-The two functions, \Rfunction{rlog} and
-\Rfunction{varianceStabilizingTransformation}, have an argument
-\Robject{blind}, for whether the transformation should be blind to the
-sample information specified by the design formula. When
-\Robject{blind} equals \Robject{TRUE} (the default), the functions
-will re-estimate the dispersions using only an intercept (design
-formula $\sim 1$). This setting should be used in order to compare
-samples in a manner wholly unbiased by the information about
-experimental groups, for example to perform sample QA (quality
-assurance) as demonstrated below.
-
-However, blind dispersion estimation is not the appropriate choice if
-one expects that many or the majority of genes (rows) will have large
-differences in counts which are explainable by the experimental design,
-and one wishes to transform the data for downstream analysis. In this
-case, using blind dispersion estimation will lead to large estimates
-of dispersion, as it attributes differences due to experimental design
-as unwanted ``noise'', and will result in overly shrinking the transformed
-values towards each other. 
-By setting \Robject{blind} to \Robject{FALSE}, the dispersions
-already estimated will be used to perform transformations, or if not
-present, they will be estimated using the current design formula. Note
-that only the fitted dispersion estimates from mean-dispersion trend
-line are used in the transformation (the global dependence of
-dispersion on mean for the entire experiment).
-So setting \Robject{blind} to \Robject{FALSE} is still for the most
-part not using the information about which samples were in which
-experimental group in applying the transformation.
-
-\subsubsection{Extracting transformed values}
-
-These functions return an object of class \Rclass{DESeqTransform}
-which is a subclass of \Rclass{RangedSummarizedExperiment}. 
-For $\sim 20$ samples, running on a newly created \Robject{DESeqDataSet},
-\Rfunction{rlog} may take 30 seconds, 
-\Rfunction{varianceStabilizingTransformation} may take 5 seconds, and
-\Rfunction{vst} less than 1 second (by subsetting to 1000 genes for
-calculating the global dispersion trend).
-However, the running times are shorter and more similar with \Rcode{blind=FALSE} and
-if the function \Rfunction{DESeq} has already been run, because then
-it is not necessary to re-estimate the dispersion values.
-The \Rfunction{assay} function is used to extract the matrix of normalized values.
-
-<<rlogAndVST>>=
-rld <- rlog(dds, blind=FALSE)
-vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
-vsd.fast <- vst(dds, blind=FALSE)
-head(assay(rld), 3)
-@
-
-\subsubsection{Regularized log transformation}
-
-The function \Rfunction{rlog}, stands for \emph{regularized log},
-transforming the original count data to the log2 scale by fitting a
-model with a term for each sample and a prior distribution on the
-coefficients which is estimated from the data. This is the same kind
-of shrinkage (sometimes referred to as regularization, or moderation)
-of log fold changes used by the \Rfunction{DESeq} and
-\Rfunction{nbinomWaldTest}, as seen in Figure \ref{fig:MA}. The
-resulting data contains elements defined as:
-
-$$ \log_2(q_{ij}) = \beta_{i0} + \beta_{ij} $$
-
-where $q_{ij}$ is a parameter proportional to the expected true
-concentration of fragments for gene $i$ and sample $j$ (see
-Section~\ref{sec:glm}), $\beta_{i0}$ is an intercept which does not
-undergo shrinkage, and $\beta_{ij}$ is the sample-specific effect
-which is shrunk toward zero based on the dispersion-mean trend over
-the entire dataset. The trend typically captures high dispersions for
-low counts, and therefore these genes exhibit higher shrinkage from
-the\Rfunction{rlog}.
-
-Note that, as $q_{ij}$ represents the part of the mean value
-$\mu_{ij}$ after the size factor $s_j$ has been divided out, it is
-clear that the rlog transformation inherently accounts for differences
-in sequencing depth.  Without priors, this design matrix would lead to
-a non-unique solution, however the addition of a prior on
-non-intercept betas allows for a unique solution to be found.  The
-regularized log transformation is preferable to the variance
-stabilizing transformation if the size factors vary widely.
-
-\subsubsection{Variance stabilizing transformation}
-
-Above, we used a parametric fit for the dispersion. In this case, the
-closed-form expression for the variance stabilizing transformation is
-used by \Rfunction{varianceStabilizingTransformation}, which is
-derived in the file \texttt{vst.pdf}, that is distributed in the
-package alongside this vignette. If a local fit is used (option
-\Robject{fitType="locfit"} to \Rfunction{estimateDispersions}) a
-numerical integration is used instead.
-
-<<vsd1, echo=FALSE, fig.width=4.5, fig.height=4.5, fig.show="asis", fig.small=TRUE, fig.pos="!bt", fig.cap="VST and log2. Graphs of the variance stabilizing transformation for sample 1, in blue, and of the transformation $f(n) = \\log_2(n/s_1)$, in black. $n$ are the counts and $s_1$ is the size factor for the first sample.\\label{figure/vsd1-1}">>=
-px     <- counts(dds)[,1] / sizeFactors(dds)[1]
-ord    <- order(px)
-ord    <- ord[px[ord] < 150]
-ord    <- ord[seq(1, length(ord), length=50)]
-last   <- ord[length(ord)]
-vstcol <- c("blue", "black")
-matplot(px[ord],
-        cbind(assay(vsd)[, 1], log2(px))[ord, ],
-        type="l", lty=1, col=vstcol, xlab="n", ylab="f(n)")
-legend("bottomright",
-       legend = c(
-        expression("variance stabilizing transformation"),
-        expression(log[2](n/s[1]))),
-       fill=vstcol)
-@
-
-The resulting variance stabilizing transformation is shown in Figure
-\ref{figure/vsd1-1}.  The code that produces the figure is hidden from
-this vignette for the sake of brevity, but can be seen in the
-\texttt{.Rnw} or \texttt{.R} source file. Note that the vertical axis
-in such plots is the square root of the variance over all samples, so
-including the variance due to the experimental conditions.  While a
-flat curve of the square root of variance over the mean may seem like
-the goal of such transformations, this may be unreasonable in the case
-of datasets with many true differences due to the experimental
-conditions.
-
-\subsubsection{Effects of transformations on the variance}
-
-Figure~\ref{fig:meansd} plots the standard deviation of the transformed
-data, across samples, against the mean, using the shifted
-logarithm transformation \eqref{eq:shiftedlog}, the
-regularized log transformation and the variance stabilizing transformation.
-The shifted logarithm has elevated standard deviation in the lower
-count range, and the regularized log to a lesser extent, while for
-the variance stabilized data the standard deviation is roughly constant
-along the whole dynamic range.
-
-<<meansd, fig.width=4, fig.height=3, fig.show="asis", fig.wide=TRUE, fig.pos="tb", out.width=".32\\linewidth", fig.cap="Per-gene standard deviation (taken across samples), against the rank of the mean. {\\bfhelvet(a)} for the shifted logarithm $\\log_2(n+1)$, the regularized log transformation {\\bfhelvet(b)} and the variance stabilizing transformation {\\bfhelvet(c)}.\\label{fig:meansd}", fig.subcap="">>=
-library("vsn")
-notAllZero <- (rowSums(counts(dds))>0)
-meanSdPlot(log2(counts(dds,normalized=TRUE)[notAllZero,] + 1))
-meanSdPlot(assay(rld[notAllZero,]))
-meanSdPlot(assay(vsd[notAllZero,]))
-@
-
-%---------------------------------------------------------------
-\subsection{Data quality assessment by sample clustering and visualization}\label{sec:quality}
-%---------------------------------------------------------------
-
-Data quality assessment and quality control (i.\,e.\ the removal of
-insufficiently good data) are essential steps of any data
-analysis. These steps should typically be performed 
-very early in the analysis of a new data set,
-preceding or in parallel to the differential expression testing.
-
-We define the term \emph{quality} as 
-\emph{fitness for purpose}\footnote{\url{http://en.wikipedia.org/wiki/Quality_\%28business\%29}}.
-Our purpose is the detection of differentially expressed genes, and we
-are looking in particular for samples whose experimental treatment
-suffered from an anormality that renders the data points obtained from
-these particular samples detrimental to our purpose.
-
-\subsubsection{Heatmap of the count matrix}\label{sec:hmc}
-To explore a count matrix, it is often instructive to look at it as a
-heatmap.  Below we show how to produce such a heatmap 
-for various transformations of the data.
-
-<<heatmap, dev="pdf", fig.width=5, fig.height=7>>=
-library("pheatmap")
-select <- order(rowMeans(counts(dds,normalized=TRUE)),
-                decreasing=TRUE)[1:20]
-
-nt <- normTransform(dds) # defaults to log2(x+1)
-log2.norm.counts <- assay(nt)[select,]
-df <- as.data.frame(colData(dds)[,c("condition","type")])
-pheatmap(log2.norm.counts, cluster_rows=FALSE, show_rownames=FALSE,
-         cluster_cols=FALSE, annotation_col=df)
-
-pheatmap(assay(rld)[select,], cluster_rows=FALSE, show_rownames=FALSE,
-         cluster_cols=FALSE, annotation_col=df)
-
-pheatmap(assay(vsd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
-         cluster_cols=FALSE, annotation_col=df)
-@
-
-\begin{figure*}
-\includegraphics[width=.32\textwidth]{figure/heatmap-1}
-\includegraphics[width=.32\textwidth]{figure/heatmap-2}
-\includegraphics[width=.32\textwidth]{figure/heatmap-3}
-\caption{Heatmaps showing the expression data of the \Sexpr{length(select)}
-  most highly expressed genes. The data is of log2 normalized counts (left),
-  from regularized log transformation (center) and from variance
-  stabilizing transformation (right).}
-\label{fig:heatmap2}
-\end{figure*}
-
-\subsubsection{Heatmap of the sample-to-sample distances}\label{sec:dists}
-
-Another use of the transformed data is sample clustering. Here, we apply the
-\Rfunction{dist} function to the transpose of the transformed count matrix to get
-sample-to-sample distances. We could alternatively use the variance stabilized
-transformation here.
-
-<<sampleClust>>=
-sampleDists <- dist(t(assay(rld)))
-@
-
-A heatmap of this distance matrix gives us an overview over similarities
-and dissimilarities between samples (Figure \ref{figure/figHeatmapSamples-1}):
-We have to provide a hierarchical clustering \Robject{hc} to the heatmap
-function based on the sample distances, or else the heatmap
-function would calculate a clustering based on the distances between
-the rows/columns of the distance matrix.
-
-<<figHeatmapSamples, dev="pdf", fig.width=7, fig.height=7, fig.show="asis", fig.small=TRUE, fig.pos="tb", fig.cap="Sample-to-sample distances.  Heatmap showing the Euclidean distances between the samples as calculated from the regularized log transformation.\\label{figure/figHeatmapSamples-1}">>=
-library("RColorBrewer")
-sampleDistMatrix <- as.matrix(sampleDists)
-rownames(sampleDistMatrix) <- paste(rld$condition, rld$type, sep="-")
-colnames(sampleDistMatrix) <- NULL
-colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
-pheatmap(sampleDistMatrix,
-         clustering_distance_rows=sampleDists,
-         clustering_distance_cols=sampleDists,
-         col=colors)
-@
-
-\subsubsection{Principal component plot of the samples}\label{sec:pca}
-
-Related to the distance matrix of Section~\ref{sec:dists} is the PCA
-plot of the samples, which we obtain as follows (Figure \ref{figure/figPCA-1}).
-
-<<figPCA, dev="pdf", fig.width=5, fig.height=3>>=
-plotPCA(rld, intgroup=c("condition", "type"))
-@
-
-\incfig[tbh]{figure/figPCA-1}{\textwidth}{PCA plot.}{
-  PCA plot. The \Sexpr{ncol(rld)} samples shown in the 2D
-  plane spanned by their first two principal components. This type of
-  plot is useful for visualizing the overall effect of experimental
-  covariates and batch effects.
-}
-
-It is also possible to customize the PCA plot using the
-\Rfunction{ggplot} function.
-
-<<figPCA2, dev="pdf", fig.width=5, fig.height=3>>=
-data <- plotPCA(rld, intgroup=c("condition", "type"), returnData=TRUE)
-percentVar <- round(100 * attr(data, "percentVar"))
-ggplot(data, aes(PC1, PC2, color=condition, shape=type)) +
-  geom_point(size=3) +
-  xlab(paste0("PC1: ",percentVar[1],"% variance")) +
-  ylab(paste0("PC2: ",percentVar[2],"% variance")) + 
-  coord_fixed()
-@
-
-\incfig[tbh]{figure/figPCA2-1}{\textwidth}{PCA plot.}{
-  PCA plot customized using the \CRANpkg{ggplot2} library.
-}
-
-
-\newpage
-
-%--------------------------------------------------
-\section{Variations to the standard workflow}
-%--------------------------------------------------
-
-\subsection{Wald test individual steps} \label{sec:steps}
-
-The function \Rfunction{DESeq} runs the following functions in order:
-
-<<WaldTest, eval=FALSE>>=
-dds <- estimateSizeFactors(dds)
-dds <- estimateDispersions(dds)
-dds <- nbinomWaldTest(dds)
-@
-
-\subsection{Contrasts} \label{sec:contrasts}
-
-A contrast is a linear combination of estimated log2 fold changes,
-which can be used to test if differences between groups are equal to
-zero.  The simplest use case for contrasts is an experimental design
-containing a factor with three levels, say A, B and C.  Contrasts
-enable the user to generate results for all 3 possible differences:
-log2 fold change of B vs A, of C vs A, and of C vs B.
-The \Robject{contrast} argument of \Rfunction{results} function is
-used to extract test results of log2 fold changes of interest, for example:
-
-<<simpleContrast, eval=FALSE>>=
-results(dds, contrast=c("condition","C","B"))
-@ 
-
-Log2 fold changes can also be added and subtracted by providing a
-\Robject{list} to the \Robject{contrast} argument which has two elements:
-the names of the log2 fold changes to add, and the names of the log2
-fold changes to subtract. The names used in the list should come from
-\Robject{resultsNames(dds)}.
-
-Alternatively, a numeric vector of the
-length of \Robject{resultsNames(dds)} can be provided, for manually
-specifying the linear combination of terms.  Demonstrations of the use
-of contrasts for various designs can be found in the examples section
-of the help page for the \Rfunction{results} function. The
-mathematical formula that is used to generate the contrasts can be found in
-Section~\ref{sec:ctrstTheory}.
-
-\subsection{Interactions} \label{sec:interactions}
-
-Interaction terms can be added to the design formula, in order to
-test, for example, if the log2 fold change attributable to a given
-condition is \textit{different} based on another factor, for example if the
-condition effect differs across genotype.
-
-Many users begin to add interaction terms to the design formula, when
-in fact a much simpler approach would give all the results tables that
-are desired. We will explain this approach first, because it is much
-simpler to perform.
-If the comparisons of interest are, for example, the effect
-of a condition for different sets of samples, a simpler approach than
-adding interaction terms explicitly to the design formula is to
-perform the following steps:
-
-\begin{enumerate}
-\item combine the factors of interest into a single factor with all
-  combinations of the original factors 
-\item change the design to include just this factor, e.g. \Robject{\lowtilde{} group}
-\end{enumerate}
-
-Using this design is similar to adding an interaction term, 
-in that it models multiple condition effects which
-can be easily extracted with \Rfunction{results}.
-Suppose we have two factors \Robject{genotype} (with values I, II, and III) 
-and \Robject{condition} (with values A and B), and we want to extract 
-the condition effect specifically for each genotype. We could use the
-following approach to obtain, e.g. the condition effect for genotype I: 
-
-<<combineFactors, eval=FALSE>>=
-dds$group <- factor(paste0(dds$genotype, dds$condition))
-design(dds) <- ~ group
-dds <- DESeq(dds)
-resultsNames(dds)
-results(dds, contrast=c("group", "IB", "IA"))
-@
-
-<<interFig, dev="pdf", fig.width=4, fig.height=3, echo=FALSE, results="hide">>=
-npg <- 20
-mu <- 2^c(8,10,9,11,10,12)
-cond <- rep(rep(c("A","B"),each=npg),3)
-geno <- rep(c("I","II","III"),each=2*npg)
-table(cond, geno)
-counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
-d <- data.frame(log2c=log2(counts+1), cond, geno)
-library(ggplot2)
-plotit <- function(d, title) {
-  ggplot(d, aes(x=cond, y=log2c, group=geno)) + 
-    geom_jitter(size=1.5, position = position_jitter(width=.15)) +
-    facet_wrap(~ geno) + 
-    stat_summary(fun.y=mean, geom="line", colour="red", size=0.8) + 
-    xlab("condition") + ylab("log2(counts+1)") + ggtitle(title)
-}
-plotit(d, "Gene 1") + ylim(7,13)
-lm(log2c ~ cond + geno + geno:cond, data=d)
-@ 
-
-<<interFig2, dev="pdf", fig.width=4, fig.height=3,  echo=FALSE, results="hide">>=
-mu[4] <- 2^12
-mu[6] <- 2^8
-counts <- rnbinom(6*npg, mu=rep(mu,each=npg), size=1/.01)
-d2 <- data.frame(log2c=log2(counts + 1), cond, geno)
-plotit(d2, "Gene 2") + ylim(7,13)
-lm(log2c ~ cond + geno + geno:cond, data=d2)
-@ 
-
-\begin{figure*}
-\includegraphics[width=.49\textwidth]{figure/interFig-1}
-\includegraphics[width=.49\textwidth]{figure/interFig2-1}
-\caption{
-  Genotype-specific condition effects.
-  Here, the y-axis represents $\log_2(\textrm{counts}+1)$, and each
-  group has 20 samples (black dots). A red line connects the mean of
-  the groups within each genotype.
-  On the left side (Gene 1), note that the condition effect is consistent
-  across genotypes. Although condition A has a different baseline for
-  I,II, and III, the condition effect is a log2 fold change of about 2
-  for each genotype.
-  Using a model with an interaction term \Robject{genotype:condition},
-  the interaction terms for genotype II and genotype III will be nearly 0.
-  On the right side (Gene 2), we can see that the condition effect is
-  not consistent across genotype. Here the main condition effect (the
-  effect for the reference genotype I) is again 2. However, this time
-  the interaction terms will be around 1 for genotype II and
-  -4 for genotype III. This is 
-  because the condition effect is higher by 1 for genotype II compared to
-  genotype I, and lower by 4 for genotype III compared to genotype I.
-  The condition effect for genotype II (or III) is obtained by adding the
-  main condition effect and the interaction term for that genotype.
-  Such a plot can be made using the \Rfunction{plotCounts} function
-  (Section~\ref{sec:plotcounts}).
-}
-\label{fig:inter}
-\end{figure*}
-
-Now we will continue to explain the use of interactions in order to
-test for \textit{differences} in condition effects. We continue with
-the example of condition effects across three genotypes (I, II, and III).
-For a diagram of how interactions might look across genotypes 
-please refer to Figure \ref{fig:inter}. 
-
-The key point to remember about designs with interaction terms is
-that, unlike for a design \Robject{\lowtilde{} 
-  genotype + condition}, where the condition effect represents the
-\textit{overall} effect controlling for differences due to genotype, by adding
-\Robject{genotype:condition}, the main condition effect only
-represents the effect of condition for the \textit{reference level} of
-genotype (I, or whichever level was defined by the user as the
-reference level). The interaction terms \Robject{genotypeII.conditionB}
-and \Robject{genotypeIII.conditionB} give the \textit{difference}
-between the condition effect for a given genotype and the condition
-effect for the reference genotype. 
-
-This genotype-condition interaction example is examined in further
-detail in Example 3 in the help page for \Rfunction{results}, which
-can be found by typing \Rcode{?results}. In particular, we show how to
-test for differences in the condition effect across genotype, and we
-show how to obtain the condition effect for non-reference genotypes.
-Note that in \deseqtwo{} version 1.10, the \Rfunction{DESeq} function will turn
-off log fold change shrinkage (setting \Robject{betaPrior=FALSE}),
-for designs which contain an interaction term. Turning off the log
-fold change shrinkage allows the software to use standard model
-matrices (as would be produced by \Rfunction{model.matrix}), where the
-interaction coefficients are easier to interpret.
-
-\subsection{Time-series experiments}
-
-There are a number of ways to analyze time-series experiments,
-depending on the biological question of interest. In order to test for
-any differences over multiple time points, once can use a design
-including the time factor, and then test using the likelihood ratio
-test as described in Section~\ref{sec:LRT}, where the time factor is
-removed in the reduced formula. For a control and treatment time
-series, one can use a design formula containing the condition factor,
-the time factor, and the interaction of the two. In this case, using
-the likelihood ratio test with a reduced model which does not contain
-the interaction terms will test whether the condition induces a change
-in gene expression at any time point after the reference level time point
-(time 0). An example of the later analysis is provided in an RNA-seq
-workflow on the Bioconductor
-website: \url{http://www.bioconductor.org/help/workflows/rnaseqGene/}.
-
-\subsection{Likelihood ratio test} \label{sec:LRT}
-
-\deseqtwo{} offers two kinds of hypothesis tests: the Wald test, where
-we use the estimated standard error of a log2 fold change to test if it is
-equal to zero, and the likelihood ratio test (LRT). The LRT examines
-two models for the counts, a \emph{full} model with a certain number
-of terms and a \emph{reduced} model, in which some of the terms of the
-\emph{full} model are removed. The test determines if the increased
-likelihood of the data using the extra terms in the \emph{full} model
-is more than expected if those extra terms are truly zero.
-
-The LRT is therefore useful for testing multiple
-terms at once, for example testing 3 or more levels of a factor at once,
-or all interactions between two variables. 
-The LRT for count data is conceptually similar to an analysis of variance (ANOVA)
-calculation in linear regression, except that in the case of the Negative
-Binomial GLM, we use an analysis of deviance (ANODEV), where the
-\emph{deviance} captures the difference in likelihood between a full
-and a reduced model.
-
-The likelihood ratio test can be performed by specifying \Rcode{test="LRT"}
-when using the \Rfunction{DESeq} function, and
-providing a reduced design formula, e.g. one in which a
-number of terms from \Robject{design(dds)} are removed.
-The degrees of freedom for the test is obtained from the difference
-between the number of parameters in the two models. 
-A simple likelihood ratio test, if the full design was
-\Robject{~condition} would look like:
-
-<<simpleLRT, eval=FALSE>>=
-dds <- DESeq(dds, test="LRT", reduced=~1)
-res <- results(dds)
-@ 
-
-If the full design contained other variables, 
-such as a batch variable,
-then the likelihood ratio test would look like:
-
-<<simpleLRT2, eval=FALSE>>=
-dds <- DESeq(dds, test="LRT", reduced=~batch)
-res <- results(dds)
-@ 
-
-\subsection{Approach to count outliers} \label{sec:outlierApproach}
-
-RNA-seq data sometimes contain isolated instances of very large counts that are apparently
-unrelated to the experimental or study design, and which may be 
-considered outliers. There are many reasons why outliers can arise, including rare
-technical or experimental artifacts, read mapping problems in the case of genetically
-differing samples, and genuine, but rare biological events. In many cases, users appear
-primarily interested in genes that show a consistent behavior, and this is the reason why
-by default, genes that are affected by such outliers are set aside by \deseqtwo{}, 
-or if there are sufficient samples, outlier counts are replaced for model fitting. 
-These two behaviors are described below.
-
-The \Rfunction{DESeq} function calculates, for every gene and for every sample,
-a diagnostic test for outliers called \emph{Cook's distance}. Cook's distance 
-is a measure of how much a single sample is influencing the fitted 
-coefficients for a gene, and a large value of Cook's distance is 
-intended to indicate an outlier count. 
-The Cook's distances are stored as a matrix available in 
-\Robject{assays(dds)[["cooks"]]}.
-
-The \Rfunction{results} function automatically flags genes which contain a 
-Cook's distance above a cutoff for samples which have 3 or more replicates. 
-The $p$ values and adjusted $p$ values for these genes are set to \Robject{NA}. 
-At least 3 replicates are required for flagging, as it is difficult to judge
-which sample might be an outlier with only 2 replicates.
-This filtering can be turned off with \Rcode{results(dds, cooksCutoff=FALSE)}.
-
-With many degrees of freedom -- i.\,e., many more samples than number of parameters to 
-be estimated -- it is undesirable to remove entire genes from the analysis
-just because their data include a single count outlier. When there
-are 7 or more replicates for a given sample, the \Rfunction{DESeq}
-function will automatically replace counts with large Cook's distance 
-with the trimmed mean over all samples, scaled up by the size factor or 
-normalization factor for that sample. This approach is conservative, 
-it will not lead to false positives, as it replaces
-the outlier value with the value predicted by the null hypothesis.
-This outlier replacement only occurs when there are 7 or more
-replicates, and can be turned off with 
-\Rcode{DESeq(dds, minReplicatesForReplace=Inf)}.
-
-The default Cook's distance cutoff for the two behaviors described above
-depends on the sample size and number of parameters
-to be estimated. The default is to use the $99\%$ quantile of the 
-$F(p,m-p)$ distribution (with $p$ the number of parameters including the 
-intercept and $m$ number of samples).
-The default for gene flagging can be modified using the \Robject{cooksCutoff} 
-argument to the \Rfunction{results} function. 
-For outlier replacement, \Rfunction{DESeq} preserves the original counts in
-\Robject{counts(dds)} saving the replacement counts as a matrix named
-\Robject{replaceCounts} in \Robject{assays(dds)}.
-Note that with continuous variables in the design, outlier detection
-and replacement is not automatically performed, as our 
-current methods involve a robust estimation of within-group variance
-which does not extend easily to continuous covariates. However, users
-can examine the Cook's distances in \Rcode{assays(dds)[["cooks"]]}, in
-order to perform manual visualization and filtering if necessary.
-
-\textbf{Note on many outliers:} if there are very many outliers 
-(e.g. many hundreds or thousands) reported by
-\Rcode{summary(res)}, one might consider further exploration to see if
-a single sample or a few samples should be removed due to low quality. 
-The automatic outlier filtering/replacement is most useful in situations which the number
-of outliers is limited. When there are thousands of reported outliers, 
-it might make more sense to turn off the outlier filtering/replacement
-(\Rfunction{DESeq} with \Robject{minReplicatesForReplace=Inf} and
-\Rfunction{results} with \Robject{cooksCutoff=FALSE})
-and perform manual inspection: First it would be
-advantageous to make a PCA plot using the code example in Section
-\ref{sec:pca} to spot individual sample outliers; Second, one can make
-a boxplot of the Cook's distances to see if one sample is consistently
-higher than others: 
-
-<<boxplotCooks, fig.show="asis", fig.small=TRUE, fig.cap="Boxplot of Cook's distances.  Here we can look to see if one sample has much higher Cook's distances than the other samples. In this case, the samples all have comparable range of Cook's distances.\\label{figure/boxplotCooks-1}">>=
-par(mar=c(8,5,2,2))
-boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
-@ 
-
-\subsection{Dispersion plot and fitting alternatives}
-
-Plotting the dispersion estimates is a useful diagnostic. The dispersion
-plot in Figure \ref{figure/dispFit-1} is typical, with the final estimates shrunk
-from the gene-wise estimates towards the fitted estimates. Some gene-wise
-estimates are flagged as outliers and not shrunk towards the fitted value,
-(this outlier detection is described in the manual page for \Rfunction{estimateDispersionsMAP}).
-The amount of shrinkage can be more or less than seen here, depending 
-on the sample size, the number of coefficients, the row mean
-and the variability of the gene-wise estimates.
-
-<<dispFit, fig.show="asis", fig.small=TRUE, fig.cap="Dispersion plot.  The dispersion estimate plot shows the gene-wise estimates (black), the fitted values (red), and the final maximum \\textit{a posteriori} estimates used in testing (blue).\\label{figure/dispFit-1}">>=
-plotDispEsts(dds)
-@
-
-\subsubsection{Local or mean dispersion fit}
-
-A local smoothed dispersion fit is automatically substitited in the case that
-the parametric curve doesn't fit the observed dispersion mean relationship.
-This can be prespecified by providing the argument
-\Robject{fitType="local"} to either \Rfunction{DESeq} or \Rfunction{estimateDispersions}.
-Additionally, using the mean of gene-wise disperion estimates as the
-fitted value can be specified by providing the argument \Robject{fitType="mean"}. 
-
-\subsubsection{Supply a custom dispersion fit}
-
-Any fitted values can be provided during dispersion estimation, using
-the lower-level functions described in the manual page for
-\Rfunction{estimateDispersionsGeneEst}. In the code chunk below, we
-store the gene-wise estimates which were already calculated and saved 
-in the metadata column \Robject{dispGeneEst}. Then we calculate the
-median value of the dispersion estimates above a threshold, and save
-these values as the fitted dispersions, using the replacement function
-for \Rfunction{dispersionFunction}. In the last line, the function
-\Rfunction{estimateDispersionsMAP}, uses the 
-fitted dispersions to generate maximum \textit{a posteriori} (MAP)
-estimates of dispersion. 
-
-<<dispFitCustom>>=
-ddsCustom <- dds
-useForMedian <- mcols(ddsCustom)$dispGeneEst > 1e-7
-medianDisp <- median(mcols(ddsCustom)$dispGeneEst[useForMedian],
-                     na.rm=TRUE)
-dispersionFunction(ddsCustom) <- function(mu) medianDisp
-ddsCustom <- estimateDispersionsMAP(ddsCustom)
-@
-
-
-\subsection{Independent filtering of results}\label{sec:autoFilt}
-
-The \Rfunction{results} function of the \deseqtwo{} package 
-performs independent filtering by default using 
-the mean of normalized counts as a filter statistic. 
-A threshold on the filter statistic is found which optimizes the number
-of adjusted $p$ values lower than a significance level \Robject{alpha}
-(we use the standard variable name for significance level, 
-though it is unrelated to the dispersion parameter $\alpha$). 
-The theory behind independent filtering is discussed in greater detail
-in Section~\ref{sec:indepfilt}. The adjusted $p$ values for the genes
-which do not pass the filter threshold are set to \Robject{NA}.
-
-The independent filtering is performed using the \Rfunction{filtered\_p} function 
-of the \Biocpkg{genefilter} package, and all of the arguments of \Rfunction{filtered\_p}
-can be passed to the \Rfunction{results} function. 
-The filter threshold value and the number of rejections at each quantile
-of the filter statistic are available as metadata of the object 
-returned by \Rfunction{results}. For example, we can visualize
-the optimization by plotting the \Robject{filterNumRej} attribute of 
-the results object, as seen in Figure \ref{figure/filtByMean-1}.
-
-<<filtByMean, dev="pdf", fig.show="asis", fig.small=TRUE, fig.cap="Independent filtering.  The \\Rfunction{results} function maximizes the number of rejections (adjusted $p$ value less than a significance level), over the quantiles of a filter statistic (the mean of normalized counts). The threshold chosen (vertical line) is the lowest quantile of the filter for which the number of rejections is within 1 residual standard deviation to the peak of a curve fit to the number of rejections o [...]
-metadata(res)$alpha
-metadata(res)$filterThreshold
-plot(metadata(res)$filterNumRej, 
-     type="b", ylab="number of rejections",
-     xlab="quantiles of filter")
-lines(metadata(res)$lo.fit, col="red")
-abline(v=metadata(res)$filterTheta)
-@
-
-Independent filtering can be turned off by setting 
-\Robject{independentFiltering} to \Robject{FALSE}.
-
-<<noFilt>>=
-resNoFilt <- results(dds, independentFiltering=FALSE)
-addmargins(table(filtering=(res$padj < .1),
-                 noFiltering=(resNoFilt$padj < .1)))
-@ 
-
-\subsection{Tests of log2 fold change above or below a threshold}
-
-It is also possible to provide thresholds for constructing
-Wald tests of significance. Two arguments to the \Rfunction{results}
-function allow for threshold-based Wald tests: \Robject{lfcThreshold},
-which takes a numeric of a non-negative threshold value, 
-and \Robject{altHypothesis}, which specifies the kind of test.
-Note that the \textit{alternative hypothesis} is specified by the user, 
-i.e. those genes which the user is interested in finding, and the test 
-provides $p$ values for the null hypothesis, the complement of the set 
-defined by the alternative. The \Robject{altHypothesis} argument can take one 
-of the following four values, where $\beta$ is the log2 fold change
-specified by the \Robject{name} argument:
-
-\begin{itemize}
- \item \Robject{greaterAbs} - $|\beta| > \textrm{lfcThreshold}$ - tests are two-tailed
- \item \Robject{lessAbs} - $|\beta| < \textrm{lfcThreshold}$ - $p$ values are the maximum of the upper and lower tests
- \item \Robject{greater} - $\beta > \textrm{lfcThreshold} $
- \item \Robject{less} - $\beta < -\textrm{lfcThreshold} $
-\end{itemize}
-
-The test \Robject{altHypothesis="lessAbs"} requires that the user have
-run \Rfunction{DESeq} with the argument \Robject{betaPrior=FALSE}.  To
-understand the reason for this requirement, consider that during
-hypothesis testing, the null hypothesis is favored unless the data
-provide strong evidence to reject the null.  For this test, including
-a zero-centered prior on log fold change would favor the alternative
-hypothesis, shrinking log fold changes toward zero.  Removing the
-prior on log fold changes for tests of small log fold change allows
-for detection of only those genes where the data alone provides
-evidence against the null.
-
-The four possible values of \Robject{altHypothesis} are demonstrated
-in the following code and visually by MA-plots in Figure~\ref{figure/lfcThresh-1}. 
-First we run \Rfunction{DESeq} and specify \Robject{betaPrior=FALSE} in order 
-to demonstrate \Robject{altHypothesis="lessAbs"}.
-
-<<ddsNoPrior>>=
-ddsNoPrior <- DESeq(dds, betaPrior=FALSE)
-@
-
-In order to produce results tables for the following tests, the same arguments
-(except \Robject{ylim}) would be provided to the \Rfunction{results} function. 
-
-<<lfcThresh, fig.show="asis", fig.cap='MA-plots of tests of log2 fold change with respect to a threshold value.  Going left to right across rows, the tests are for \\Robject{altHypothesis = "greaterAbs"}, \\Robject{"lessAbs"}, \\Robject{"greater"}, and \\Robject{"less"}.\\label{figure/lfcThresh-1}'>>=
-par(mfrow=c(2,2),mar=c(2,2,1,1))
-yl <- c(-2.5,2.5)
-
-resGA <- results(dds, lfcThreshold=.5, altHypothesis="greaterAbs")
-resLA <- results(ddsNoPrior, lfcThreshold=.5, altHypothesis="lessAbs")
-resG <- results(dds, lfcThreshold=.5, altHypothesis="greater")
-resL <- results(dds, lfcThreshold=.5, altHypothesis="less")
-
-plotMA(resGA, ylim=yl)
-abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
-plotMA(resLA, ylim=yl)
-abline(h=c(-.5,.5),col="dodgerblue",lwd=2)
-plotMA(resG, ylim=yl)
-abline(h=.5,col="dodgerblue",lwd=2)
-plotMA(resL, ylim=yl)
-abline(h=-.5,col="dodgerblue",lwd=2)
-@ 
-
-\subsection{Access to all calculated values}\label{sec:access}
-
-All row-wise calculated values (intermediate dispersion calculations,
-coefficients, standard errors, etc.) are stored in the \Rclass{DESeqDataSet} 
-object, e.g. \Robject{dds} in this vignette. These values are accessible 
-by calling \Rfunction{mcols} on \Robject{dds}. 
-Descriptions of the columns are accessible by two calls to 
-\Rfunction{mcols}.
-
-<<mcols>>=
-mcols(dds,use.names=TRUE)[1:4,1:4]
-# here using substr() only for display purposes
-substr(names(mcols(dds)),1,10) 
-mcols(mcols(dds), use.names=TRUE)[1:4,]
-@
-
-The mean values $\mu_{ij} = s_j q_{ij}$ and the Cook's distances for each gene and
-sample are stored as matrices in the assays slot:
-
-<<muAndCooks>>=
-head(assays(dds)[["mu"]])
-head(assays(dds)[["cooks"]])
-@ 
-
-The dispersions $\alpha_i$ can be accessed with the
-\Rfunction{dispersions} function.
-
-<<dispersions>>=
-head(dispersions(dds))
-# which is the same as 
-head(mcols(dds)$dispersion)
-@ 
-
-The size factors $s_j$ are accessible via \Rfunction{sizeFactors}:
-
-<<sizefactors>>=
-sizeFactors(dds)
-@ 
-
-For advanced users, we also include a convenience function \Rfunction{coef} for 
-extracting the matrix of coefficients $[\beta_{ir}]$ for all genes $i$ and
-parameters $r$, as in the formula in Section~\ref{sec:glm}.
-This function can also return a matrix of standard errors, see \Robject{?coef}.
-The columns of this matrix correspond to the effects returned by \Rfunction{resultsNames}.
-Note that the \Rfunction{results} function is best for building 
-results tables with $p$ values and adjusted $p$ values.
-
-<<coef>>=
-head(coef(dds))
-@ 
-
-The beta prior variance $\sigma_r^2$ is stored as an attribute of the
-\Rclass{DESeqDataSet}: 
-
-<<betaPriorVar>>=
-attr(dds, "betaPriorVar")
-@ 
-
-The dispersion prior variance $\sigma_d^2$ is stored as an
-attribute of the dispersion function:
-
-<<dispPriorVar>>=
-dispersionFunction(dds)
-attr(dispersionFunction(dds), "dispPriorVar")
-@ 
-
-The version of \deseqtwo{} which was used to construct the
-\Rclass{DESeqDataSet} object, or the version used when
-\Rfunction{DESeq} was run, is stored here:
-
-<<versionNum>>=
-metadata(dds)[["version"]]
-@ 
-
-\subsection{Sample-/gene-dependent normalization factors} \label{sec:normfactors}
-
-In some experiments, there might be gene-dependent dependencies
-which vary across samples. For instance, GC-content bias or length
-bias might vary across samples coming from different labs or
-processed at different times. We use the terms ``normalization factors''
-for a gene $\times$ sample matrix, and ``size factors'' for a
-single number per sample.  Incorporating normalization factors,
-the mean parameter $\mu_{ij}$ from Section~\ref{sec:glm} becomes:
-
-$$ \mu_{ij} = NF_{ij} q_{ij} $$
-
-with normalization factor matrix $NF$ having the same dimensions
-as the counts matrix $K$. This matrix can be incorporated as shown
-below. We recommend providing a matrix with row-wise geometric means of $1$, 
-so that the mean of normalized counts for a gene is close to the mean
-of the unnormalized counts.
-This can be accomplished by dividing out the current row geometric means.
-
-<<normFactors, eval=FALSE>>=
-normFactors <- normFactors / exp(rowMeans(log(normFactors)))
-normalizationFactors(dds) <- normFactors
-@
-
-These steps then replace \Rfunction{estimateSizeFactors} in the steps
-described in Section~\ref{sec:steps}. Normalization factors, if present,
-will always be used in the place of size factors.
-
-The methods provided by the \Biocpkg{cqn} or \Biocpkg{EDASeq} packages
-can help correct for GC or length biases. They both describe in their
-vignettes how to create matrices which can be used by \deseqtwo{}.
-From the formula above, we see that normalization factors should be on
-the scale of the counts, like size factors, and unlike offsets which
-are typically on the scale of the predictors (i.e. the logarithmic scale for
-the negative binomial GLM). At the time of writing, the transformation
-from the matrices provided by these packages should be:
-
-<<offsetTransform, eval=FALSE>>=
-cqnOffset <- cqnObject$glm.offset
-cqnNormFactors <- exp(cqnOffset)
-EDASeqNormFactors <- exp(-1 * EDASeqOffset)
-@
-
-\subsection{``Model matrix not full rank''}
-
-While most experimental designs run easily using design formula, some
-design formulas can cause problems and result in the \Rfunction{DESeq}
-function returning an error with the text: ``the model matrix is not
-full rank, so the model cannot be fit as specified.''  There are two
-main reasons for this problem: either one or more columns in the model
-matrix are linear combinations of other columns, or there are levels
-of factors or combinations of levels of multiple factors which are
-missing samples. We address these two problems below and discuss
-possible solutions:
-
-\subsubsection{Linear combinations}
-
-The simplest case is the linear combination, or linear dependency
-problem, when two variables contain exactly the same information, such
-as in the following sample table. The software cannot fit an effect
-for \Robject{batch} and \Robject{condition}, because they produce
-identical columns in the model matrix. This is also referred to as
-``perfect confounding''. A unique solution of coefficients (the $\beta_i$ in
-the formula in Section~\ref{sec:glm}) is not possible.
-
-<<lineardep, echo=FALSE>>=
-data.frame(batch=factor(c(1,1,2,2)), condition=factor(c("A","A","B","B")))
-@ 
-
-Another situation which will cause problems is when the variables are
-not identical, but one variable can be formed by the combination of
-other factor levels. In the following example, the effect of batch 2
-vs 1 cannot be fit because it is identical to a column in the model
-matrix which represents the condition C vs A effect.
-
-<<lineardep2, echo=FALSE>>=
-data.frame(batch=factor(c(1,1,1,1,2,2)), condition=factor(c("A","A","B","B","C","C")))
-@ 
-
-In both of these cases above, the batch effect cannot be fit and must
-be removed from the model formula. There is just no way to tell apart
-the condition effects and the batch effects. The options are either to assume
-there is no batch effect (which we know is highly unlikely given the
-literature on batch effects in sequencing datasets) or to repeat the
-experiment and properly balance the conditions across batches.
-A balanced design would look like:
-
-<<lineardep3, echo=FALSE>>=
-data.frame(batch=factor(c(1,1,1,2,2,2)), condition=factor(c("A","B","C","A","B","C")))
-@ 
-
-Finally, there is a case where we can in fact perform inference.
-Consider an experiment with grouped individuals,
-where we seek to test the group-specific effect of a treatment, while
-controlling for individual effects. A simple example of such a design is:
-
-<<groupeffect>>=
-(coldata <- data.frame(grp=factor(rep(c("X","Y"),each=4)),
-                       ind=factor(rep(1:4,each=2)),
-                       cnd=factor(rep(c("A","B"),4))))
-@
-
-
-This design can be analyzed by \deseqtwo{} but requires a bit of
-refactoring in order to fit the model terms. Here we will use a trick
-described in the \Biocpkg{edgeR} user guide, from the section
-``Comparisons Both Between and Within Subjects''.  If we try to
-analyze with a formula such as, \Rcode{$\sim$ ind + grp*cnd}, we will
-obtain an error, because the effect for group is a linear combination
-of the individuals.
-
-However, the following steps allow for an analysis of group-specific
-condition effects, while controlling for differences in individual.
-For object construction, use a dummy design, such as \Rcode{$\sim$
-  1}. Then add a column \Robject{ind.n} which distinguishes the
-individuals ``nested'' within a group. Here, we add this column to
-coldata, but in practice you would add this column to \Rcode{dds}.
-
-<<groupeffect2>>=
-coldata$ind.n <- factor(rep(rep(1:2,each=2),2))
-coldata
-@ 
-
-Now we can reassign our \Rclass{DESeqDataSet} a design of
-\Rcode{$\sim$ grp + grp:ind.n + grp:cnd}, before we call
-\Rfunction{DESeq}. This new design will result in the following model
-matrix: 
-
-<<groupeffect3>>=
-model.matrix(~ grp + grp:ind.n + grp:cnd, coldata)
-@ 
-
-where the terms \Robject{grpX.cndB} and \Robject{grpY.cndB} give the
-group-specific condition effects. These can be extracted using
-\Rfunction{results} with the \Robject{name} argument.
-Furthermore, \Robject{grpX.cndB} and
-\Robject{grpY.cndB} can be contrasted using the \Robject{contrast}
-argument, in order to test if the condition effect is different across group:
-
-<<groupeffect4, eval=FALSE>>=
-results(dds, contrast=list("grpY.cndB","grpX.cndB"))
-@ 
-
-\subsubsection{Levels without samples}
-
-The base R function for creating model matrices will produce a column
-of zeros if a level is missing from a factor or a combination of
-levels is missing from an interaction of factors. The solution to the
-first case is to call \Rfunction{droplevels} on the column, which will
-remove levels without samples. This was shown in the beginning of this
-vignette.
-
-The second case is also solvable, by manually editing the model
-matrix, and then providing this to \Rfunction{DESeq}. Here we
-construct an example dataset to illustrate:
-
-<<missingcombo>>=
-group <- factor(rep(1:3,each=6))
-condition <- factor(rep(rep(c("A","B","C"),each=2),3))
-(d <- data.frame(group, condition)[-c(17,18),])
-@ 
-
-Note that if we try to estimate all interaction terms, we introduce a
-column with all zeros, as there are no condition C samples for group
-3. (Here, \Rfunction{unname} is used to display the matrix concisely.)
-
-<<missingcombo2>>=
-m1 <- model.matrix(~ condition*group, d)
-colnames(m1)
-unname(m1)
-@ 
-
-We can remove this column like so:
-
-<<missingcombo3>>=
-m1 <- m1[,-9]
-unname(m1)
-@ 
-
-Now this matrix \Robject{m1} can be provided to the \Robject{full}
-argument of \Rfunction{DESeq}.  For a likelihood ratio test of
-interactions, a model matrix using a reduced design such as
-\Rcode{$\sim$ condition + group} can be given to the \Robject{reduced}
-argument. Wald tests can also be generated instead of the likelihood
-ratio test, but for user-supplied model matrices, the argument
-\Robject{betaPrior} must be set to \Robject{FALSE}.
-
-\newpage
-
-%--------------------------------------------------
-\section{Theory behind DESeq2}
-%--------------------------------------------------
-  
-\subsection{The DESeq2 model} \label{sec:glm}
-
-The \deseqtwo{} model and all the steps taken in the software
-are described in detail in our publication \cite{Love2014},
-and we include the formula and descriptions in this section as well.
-The differential expression analysis in \deseqtwo{} uses a generalized
-linear model of the form:
-
-$$ K_{ij} \sim \textrm{NB}(\mu_{ij}, \alpha_i) $$
-$$ \mu_{ij} = s_j q_{ij} $$
-$$ \log_2(q_{ij}) = x_{j.} \beta_i $$
-
-where counts $K_{ij}$ for gene $i$, sample $j$ are modeled using
-a negative binomial distribution with fitted mean $\mu_{ij}$
-and a gene-specific dispersion parameter $\alpha_i$.
-The fitted mean is composed of a sample-specific size factor
-$s_j$\footnote{The model can be generalized to use sample- 
-\textbf{and} gene-dependent normalization factors, see
-Appendix~\ref{sec:normfactors}.} and a parameter $q_{ij}$ 
-proportional to the expected true concentration of fragments for sample $j$.
-The coefficients $\beta_i$ give the log2 fold changes for gene $i$ for each 
-column of the model matrix $X$. 
-
-The dispersion parameter $\alpha_i$ defines the relationship between
-the variance of the observed count and its mean value. In other
-words, how far do we expected the observed count will be from the
-mean value, which depends both on the size factor $s_j$ and the
-covariate-dependent part $q_{ij}$ as defined above.
-
-$$ \textrm{Var}(K_{ij}) = E[ (K_{ij} - \mu_{ij})^2 ] = \mu_{ij} + \alpha_i \mu_{ij}^2 $$
-
-The log2 fold changes in $\beta_i$ are the maximum \emph{a posteriori}
-estimates after incorporating a 
-zero-centered Normal prior -- in the software referrred to as a $\beta$-prior -- hence \deseqtwo{}
-provides ``moderated'' log2 fold change estimates.  Dispersions are estimated using expected mean
-values from the maximum likelihood estimate of log2 fold changes, and optimizing the Cox-Reid
-adjusted profile likelihood, as first implemented for RNA-seq data in \Biocpkg{edgeR}
-\cite{CR,edgeR_GLM}. The steps performed by the \Rfunction{DESeq} function are documented in its
-manual page; briefly, they are:
-
-\begin{enumerate}
-\item estimation of size factors $s_j$ by \Rfunction{estimateSizeFactors}
-\item estimation of dispersion $\alpha_i$ by \Rfunction{estimateDispersions}
-\item negative binomial GLM fitting for $\beta_i$ and Wald statistics by 
-\Rfunction{nbinomWaldTest}
-\end{enumerate}
-
-For access to all the values calculated during these steps,
-see Section~\ref{sec:access}
-
-\subsection{Changes compared to the  \Biocpkg{DESeq} package}
-
-The main changes in the package \deseqtwo{}, compared to the (older)
-version \Biocpkg{DESeq}, are as follows:
-
-\begin{itemize}
-\item \Rclass{RangedSummarizedExperiment} is used as the superclass for storage of input data,
-  intermediate calculations and results.
-\item Maximum \textit{a posteriori} estimation of GLM coefficients
-  incorporating a zero-centered
-  Normal prior with variance estimated from data (equivalent to Tikhonov/ridge
-  regularization). This adjustment has little effect on genes with high counts, yet it
-  helps to moderate the otherwise large variance in log2 fold change estimates
-  for genes with low counts or highly variable counts.
-\item Maximum \textit{a posteriori} estimation of dispersion replaces the
-  \Robject{sharingMode} options \Robject{fit-only} or \Robject{maximum} of the previous version
-  of the package. This is similar to the dispersion estimation methods of DSS \cite{Wu2012New}.
-\item All estimation and inference is based on the generalized linear model, which
-  includes the two condition case (previously the \textit{exact test} was used).
-\item The Wald test for significance of GLM coefficients is provided as the default
-  inference method, with the likelihood ratio test of the previous version still available.
-\item It is possible to provide a matrix of sample-/gene-dependent
-  normalization factors (Section \ref{sec:normfactors}).
-\item Automatic independent filtering on the mean of normalized counts
-  (Section \ref{sec:indepfilt}).
-\item Automatic outlier detection and handling (Section \ref{sec:cooks}).
-\end{itemize}
-
-\subsection{Methods changes since the 2014 DESeq2 paper}
-
-\begin{itemize}
-  \item For the calculation of the beta prior variance, instead of
-    matching the empirical quantile to the quantile of a Normal
-    distribution, \deseqtwo() now uses the weighted quantile function
-    of the \CRANpkg{Hmisc} package. The weighting is described in the
-    manual page for \Rfunction{nbinomWaldTest}.  The weights are the
-    inverse of the expected variance of log counts (as used in the
-    diagonals of the matrix $W$ in the GLM). The effect of the change
-    is that the estimated prior variance is robust against noisy
-    estimates of log fold change from genes with very small
-    counts. This change was introduced in version 1.6 (October 2014).
-  \item For designs with interaction terms, the solution described in
-    the paper is no longer used (log fold change shrinkage only
-    applied to interaction terms). Instead, \deseqtwo{} now turns off
-    log fold change shrinkage for all terms if an interaction term is
-    present (\Robject{betaPrior=FALSE}).  While the inference on
-    interaction terms was correct with \Robject{betaPrior=TRUE}, the
-    interpretation of the individual terms and the extraction of
-    contrasts was too confusing.  This change was introduced in version 1.10
-    (October 2015).
-  \item A small change to the independent filtering routine: instead
-    of taking the quantile of the filter (the mean of normalized counts) which
-    directly \textit{maximizes} the number of rejections, the threshold chosen is 
-    the lowest quantile of the filter for which the
-    number of rejections is close to the peak of a curve fit
-    to the number of rejections over the filter quantiles.
-    ``Close to'' is defined as within 1 residual standard deviation.
-    This change was introduced in version 1.10 (October 2015).
-\end{itemize}
-
-For a list of all changes since version 1.0.0, see the NEWS file
-included in the package.
-
-\subsection{Count outlier detection} \label{sec:cooks}
-
-\deseqtwo{} relies on the negative binomial distribution to make
-estimates and perform statistical inference on differences.  While the
-negative binomial is versatile in having a mean and dispersion
-parameter, extreme counts in individual samples might not fit well to
-the negative binomial. For this reason, we perform automatic detection
-of count outliers. We use Cook's distance, which is a measure of how
-much the fitted coefficients would change if an individual sample were
-removed \cite{Cook1977Detection}. For more on the implementation of 
-Cook's distance see Section~\ref{sec:outlierApproach} and the manual page
-for the \Rfunction{results} function. Below we plot the maximum value of
-Cook's distance for each row over the rank of the test statistic 
-to justify its use as a filtering criterion.
-
-<<cooksPlot, fig.show="asis", fig.small=TRUE, fig.cap="Cook's distance.  Plot of the maximum Cook's distance per gene over the rank of the Wald statistics for the condition. The two regions with small Cook's distances are genes with a single count in one sample. The horizontal line is the default cutoff used for 7 samples and 3 estimated parameters.\\label{figure/cooksPlot-1}">>=
-W <- res$stat
-maxCooks <- apply(assays(dds)[["cooks"]],1,max)
-idx <- !is.na(W)
-plot(rank(W[idx]), maxCooks[idx], xlab="rank of Wald statistic", 
-     ylab="maximum Cook's distance per gene",
-     ylim=c(0,5), cex=.4, col=rgb(0,0,0,.3))
-m <- ncol(dds)
-p <- 3
-abline(h=qf(.99, p, m - p))
-@ 
-
-\subsection{Contrasts} \label{sec:ctrstTheory}
-
-Contrasts can be calculated for a \Rclass{DESeqDataSet} object for which
-the GLM coefficients have already been fit using the Wald test steps
-(\Rfunction{DESeq} with \texttt{test="Wald"} or using \Rfunction{nbinomWaldTest}).
-The vector of coefficients $\beta$ is left multiplied by the contrast vector $c$
-to form the numerator of the test statistic. The denominator is formed by multiplying
-the covariance matrix $\Sigma$ for the coefficients on either side by the 
-contrast vector $c$. The square root of this product is an estimate
-of the standard error for the contrast. The contrast statistic is then compared
-to a normal distribution as are the Wald statistics for the \deseqtwo{}
-package.
-
-$$ W = \frac{c^t \beta}{\sqrt{c^t \Sigma c}} $$
-
-\subsection{Expanded model matrices} \label{sec:expanded}
-
-\deseqtwo{} uses ``expanded model matrices'' with the log2 fold change prior, 
-in order to produce shrunken log2 fold change estimates and test 
-results which are independent of the choice of reference level. 
-Another way of saying this is that the shrinkage is \textit{symmetric}
-with respect to all the levels of the factors in the design.
-The expanded model matrices differ from the standard model matrices, in that
-they have an indicator column (and therefore a coefficient) for
-each level of factors in the design formula in addition to an intercept. 
-Note that in version 1.10 and onward, standard model matrices are used for
-designs with interaction terms, as the shrinkage of log2 fold changes
-is not recommended for these designs.
-
-The expanded model matrices are not full rank, but a coefficient
-vector $\beta_i$ can still be found due to the zero-centered prior on
-non-intercept coefficients. The prior variance for the log2 fold
-changes is calculated by first generating maximum likelihood estimates
-for a standard model matrix. The prior variance for each level of a
-factor is then set as the average of the mean squared maximum
-likelihood estimates for each level and every possible contrast, such
-that that this prior value will be reference-level-independent. The
-\Robject{contrast} argument of the \Rfunction{results} function is
-used in order to generate comparisons of interest.
-
-%--------------------------------------------------
-\subsection{Independent filtering and multiple testing} \label{sec:indepfilt}
-\subsubsection{Filtering criteria} \label{sec:filtbycount}
-%--------------------------------------------------
-
-The goal of independent filtering is to filter out those tests from the procedure 
-that have no, or little chance of showing significant evidence, without even
-looking at their test statistic. Typically, this results in increased detection
-power at the same experiment-wide type I error. Here, we  measure experiment-wide
-type I error in terms of the false discovery rate.
-
-A good choice for a filtering criterion is one that
-\begin{enumerate}
-  \item\label{it:indp} is statistically independent from the test statistic under the null hypothesis,
-  \item\label{it:corr} is correlated with the test statistic under the alternative, and
-  \item\label{it:joint} does not notably change the dependence structure --if there is any--
-    between the tests that pass the filter, compared to the dependence structure between the tests before filtering.
-\end{enumerate}
-
-The benefit from filtering relies on property \ref{it:corr}, and we will explore
-it further in Section~\ref{sec:whyitworks}. Its statistical validity relies on
-property \ref{it:indp} -- which is simple to formally prove for many combinations
-of filter criteria with test statistics-- and \ref{it:joint}, which is less
-easy to theoretically imply from first principles, but rarely a problem in practice.
-We refer to \cite{Bourgon:2010:PNAS} for further discussion of this topic.
-
-A simple filtering criterion readily available in the results object is the
-mean of normalized counts irrespective of biological condition (Figure \ref{figure/indFilt-1}),
-and so this is the criterion which is used automatically by the
-\Rfunction{results} function to perform independent filtering.
-Genes with very low counts are not likely to 
-see significant differences typically due to high
-dispersion. For example, we can plot the $-\log_{10}$ $p$ values from all genes
-over the normalized mean counts.
-
-<<indFilt, fig.show="asis", fig.small=TRUE, fig.cap="Mean counts as a filter statistic.  The mean of normalized counts provides an independent statistic for filtering the tests. It is independent because the information about the variables in the design formula is not used. By filtering out genes which fall on the left side of the plot, the majority of the low $p$ values are kept.\\label{figure/indFilt-1}">>=
-plot(res$baseMean+1, -log10(res$pvalue),
-     log="x", xlab="mean of normalized counts",
-     ylab=expression(-log[10](pvalue)),
-     ylim=c(0,30),
-     cex=.4, col=rgb(0,0,0,.3))
-@
-
-%--------------------------------------------------
-\subsubsection{Why does it work?}\label{sec:whyitworks}
-%--------------------------------------------------
-
-Consider the $p$ value histogram in Figure \ref{figure/fighistindepfilt-1}.
-It shows how the filtering ameliorates the multiple testing problem
--- and thus the severity of a multiple testing adjustment -- by
-removing a background set of hypotheses whose $p$ values are distributed
-more or less uniformly in $[0,1]$.
-
-<<histindepfilt, dev="pdf", fig.width=7, fig.height=5>>=
-use <- res$baseMean > metadata(res)$filterThreshold
-h1 <- hist(res$pvalue[!use], breaks=0:50/50, plot=FALSE)
-h2 <- hist(res$pvalue[use], breaks=0:50/50, plot=FALSE)
-colori <- c(`do not pass`="khaki", `pass`="powderblue")
-@ 
-
-<<fighistindepfilt, fig.show="asis", fig.small=TRUE, fig.cap="Histogram of p values for all tests.  The area shaded in blue indicates the subset of those that pass the filtering, the area in khaki those that do not pass.\\label{figure/fighistindepfilt-1}">>=
-barplot(height = rbind(h1$counts, h2$counts), beside = FALSE,
-        col = colori, space = 0, main = "", ylab="frequency")
-text(x = c(0, length(h1$counts)), y = 0, label = paste(c(0,1)),
-     adj = c(0.5,1.7), xpd=NA)
-legend("topright", fill=rev(colori), legend=rev(names(colori)))
-@
-
-\section{Frequently asked questions} \label{sec:faq}
-
-\subsection{How can I get support for DESeq2?}
-
-We welcome questions about our software, and want to
-ensure that we eliminate issues if and when they appear. We have a few
-requests to optimize the process:
-
-\begin{itemize}
-\item all questions should take place on the Bioconductor support
-  site: \url{https://support.bioconductor.org}, which serves as a
-  repository of questions and answers. This helps to save the
-  developers' time in responding to similar questions. Make sure to
-  tag your post with ``deseq2''. It is often very helpful in addition 
-  to describe the aim of your experiment.
-\item before posting, first search the Bioconductor support site
-  mentioned above for past threads which might have answered your
-  question.
-\item if you have a question about the behavior of a function, read
-  the sections of the manual page for this function by typing a
-  question mark and the function name, e.g. \Robject{?results}.  We
-  spend a lot of time documenting individual functions and the exact
-  steps that the software is performing.
-\item include all of your R code, especially the creation of the
-  \Rclass{DESeqDataSet} and the design formula.  Include complete
-  warning or error messages, and conclude your message with the full
-  output of \Robject{sessionInfo()}.
-\item if possible, include the output of
-  \Robject{as.data.frame(colData(dds))}, so that we can have a sense
-  of the experimental setup. If this contains confidential
-  information, you can replace the levels of those factors using
-  \Rfunction{levels()}.
-\end{itemize}
-
-\subsection{Why are some $p$ values set to \texttt{NA}?}
-  
-See the details in Section~\ref{sec:moreInfo}.  
-
-\subsection{How can I get unfiltered DESeq results?}
-
-Users can obtain unfiltered GLM results, i.e. without outlier removal
-or independent filtering with the following call:
-
-<<vanillaDESeq, eval=FALSE>>=
-dds <- DESeq(dds, minReplicatesForReplace=Inf)
-res <- results(dds, cooksCutoff=FALSE, independentFiltering=FALSE)
-@
-
-In this case, the only $p$ values set to \Robject{NA} are those from
-genes with all counts equal to zero.
-
-\subsection{How do I use the variance stabilized or rlog 
-  transformed data for differential testing?}
-  
-  The variance stabilizing and rlog transformations are provided for
-  applications other than differential testing, for example clustering
-  of samples or other machine learning applications. For differential
-  testing we recommend the \Rfunction{DESeq} function applied to raw
-  counts as outlined in Section~\ref{sec:de}.
-      
-  
-\subsection{Can I use DESeq2 to analyze paired samples?}
-
-Yes, you should use a multi-factor design which includes the sample
-information as a term in the design formula. This will account for 
-differences between the samples while estimating the effect due to 
-the condition. The condition of interest should go at the end of the 
-design formula. See Section~\ref{sec:multifactor}.
-
-\subsection{If I have multiple groups, should I run all together or split into pairs of groups?}
-
-Typically, we recommend users to run samples from all groups together, and then
-use the \Rcode{contrast} argument of the \Rfunction{results} function
-to extract comparisons of interest after fitting the model using \Rfunction{DESeq}.
-
-The model fit by \Rfunction{DESeq} estimates a single dispersion
-parameter for each gene, which defines how far we expect the observed
-count for a sample will be from the mean value from the model 
-given its size factor and its condition group. See Section~\ref{sec:glm} 
-and the \deseqtwo{} paper for full details.
-Having a single dispersion parameter for each gene is usually
-sufficient for analyzing multi-group data, as the final dispersion value will
-incorporate the within-group variability across all groups. 
-
-However, for some datasets, exploratory data analysis (EDA) plots as outlined
-in Section~\ref{sec:pca} could reveal that one or more groups has much
-higher within-group variability than the others. A simulated example
-of such a set of samples is shown in Figure~\ref{figure/varGroup-1}.
-This is case where, by comparing groups A and B separately --
-subsetting a \Rclass{DESeqDataSet} to only samples from those two
-groups and then running \Rfunction{DESeq} on this subset -- will be
-more sensitive than a model including all samples together.
-It should be noted that such an extreme range of within-group
-variability is not common, although it could arise if certain
-treatments produce an extreme reaction (e.g. cell death).
-Again, this can be easily detected from the EDA plots such as PCA
-described in this vignette.
-
-<<varGroup, echo=FALSE, fig.width=5, fig.height=5, fig.show="asis", fig.small=TRUE, fig.cap="Extreme range of within-group variability.  Typically, it is recommended to run \\Rfunction{DESeq} across samples from all groups, for datasets with multiple groups. However, this simulated dataset shows a case where it would be preferable to compare groups A and B by creating a smaller dataset without the C samples. Group C has much higher within-group variability, which would inflate the per-ge [...]
-set.seed(3)
-dds1 <- makeExampleDESeqDataSet(n=1000,m=12,betaSD=.3,dispMeanRel=function(x) 0.01)
-dds2 <- makeExampleDESeqDataSet(n=1000,m=12,
-                                betaSD=.3,
-                                interceptMean=mcols(dds1)$trueIntercept,
-                                interceptSD=0,
-                                dispMeanRel=function(x) 0.2)
-dds2 <- dds2[,7:12]
-dds2$condition <- rep("C",6)
-mcols(dds2) <- NULL
-dds <- cbind(dds1, dds2)
-rld <- rlog(dds, blind=FALSE, fitType="mean")
-plotPCA(rld)
-@ 
-
-\subsection{Can I run DESeq2 to contrast the levels of 100 groups?}
-
-\deseqtwo{} will work with any kind of design specified using the R
-formula. We enourage users to consider exploratory data analysis such
-as principal components analysis as described in Section~\ref{sec:pca}, 
-rather than performing statistical testing of all combinations of
-dozens of groups. 
-
-As a speed concern with fitting very large models, 
-note that each additional level of a factor in the
-design formula adds another parameter to the GLM which is fit by
-\deseqtwo. Users might consider first removing genes with very few
-reads, e.g.\ genes with row sum of 1, as this will speed up the
-fitting procedure.
-
-\subsection{Can I use DESeq2 to analyze a dataset without replicates?}
-
-If a \Rclass{DESeqDataSet} is provided with an experimental design without replicates,
-a warning is printed, that the samples are treated as replicates
-for estimation of dispersion. This kind of analysis is
-only useful for exploring the data, but will not provide the kind of
-proper statistical inference on differences between groups.
-Without biological replicates, it is not possible to estimate the biological
-variability of each gene. 
-More details can be found in the manual page for \Rfunction{?DESeq}.
-
-\subsection{How can I include a continuous covariate in the design formula?}
-
-Continuous covariates can be included in the design formula in the
-same manner as factorial covariates. Continuous covariates might make
-sense in certain experiments, where a constant fold change might be
-expected for each unit of the covariate.  However, in many cases, more
-meaningful results can be obtained by cutting continuous covariates
-into a factor defined over a small number of bins (e.g. 3-5).  In this
-way, the average effect of each group is controlled for, regardless of
-the trend over the continuous covariates.  In R, \Rclass{numeric}
-vectors can be converted into \Rclass{factors} using the function
-\Rfunction{cut}.
-
-\subsection{Will the log fold change shrinkage ``overshrink'' large differences?}
-
-For most datasets, the application of a prior to the log fold changes
-is a good choice, providing log fold change estimates that are
-more stable across the entire range of mean counts than the maximum
-likelihood estimates (see Figure~\ref{fig:MA} and the \deseqtwo{} paper).
-One situation in which the prior on log fold changes might
-``overshrink'' the estimates is 
-if nearly all genes show no difference across condition, a very
-small set of genes have extremely large differences, and no genes in between.
-A simulated example of such a dataset is Figure~\ref{figure/overShrink-1}.
-This is not likely to be the case for most experiments, where typically
-there is a range of differences by size: some genes with medium-to-large
-differences across treatment, and some with small differences.
-
-<<overShrink, echo=FALSE, fig.width=5, fig.height=5, fig.show="asis", fig.small=TRUE, fig.cap="Example of a dataset with where the log fold change prior should be turned off.  Here we show a simulated MA-plot, where nearly all of the log fold changes are falling near the x-axis, with three genes that have very large log fold changes (note the y-axis is from -10 to 10 on the log2 scale). This would indicate a dataset where the log fold change prior would ``overshrink'' the large fold chan [...]
-plot(c(10^rnorm(1000, 3, 2),300,2000,5000), 
-     c(rnorm(1000, 0, .15), -5.5, -8.5, 7.5),
-     ylim=c(-10,10), log="x", cex=.4,
-     xlab="mean of normalized counts", 
-     ylab="log2 fold change")
-abline(h=0, col=rgb(1,0,0,.7))
-@ 
-
-There could be experiments in which only a few genes have
-very large log fold changes, and the rest of the genes are
-nearly constant across treatment.
-Or, there could be artificially constructed libraries fitting this description,
-e.g. technical replicates where the only difference across libraries 
-is the concentration of a few spiked-in genes.
-``Overshrinking'' of a few large log fold changes
-can be assessed by running \Rfunction{results} with \Rcode{addMLE=TRUE},
-which will print a results table with columns for the shrunken and
-unshrunken (MLE) log fold changes.
-The two estimates can be visually compared by running \Rfunction{plotMA} with
-\Rcode{MLE=TRUE} and \Rcode{MLE=FALSE}. 
-If ``overshrinking'' very large log fold changes is a concern,
-it is better to turn off the log fold change prior by
-running \Rfunction{DESeq} with \Rcode{betaPrior=FALSE}.
-
-Even more detail: how do we avoid overshrinking on typical datasets?
-The answer is that we estimate the width of the log fold change prior in a
-robust way to accommodate the very largest log fold changes, and so to
-avoid overshrinking. 
-The details of the prior estimation are described in the manual page for
-\Rfunction{nbinomWaldTest}. Briefly, a weighted upper quantile
-is used to match the width of the log fold change prior to the upper
-5\% of the MLE log fold changes, weighting by the expected sampling
-variability of the estimated log fold changes given the mean count for
-each gene. Note that this does not equal an assumption that 5\% of genes are
-differentially expressed, but that a reasonable width of a log fold
-change distribution can be obtained from the upper 5\% of MLE log fold
-changes. 
-
-\subsection{I ran a likelihood ratio test, but \texttt{results()} only gives me one comparison.}
-
-``\dots How do I get the $p$ values for all of the variables/levels 
-that were removed in the reduced design?''
-
-This is explained in the help page for \texttt{?results} in the
-section about likelihood ratio test p-values, but we will restate the
-answer here. When one performs a likelihood ratio test, the $p$ values and
-the test statistic (the \Robject{stat} column) are values for the test
-that removes all of the variables which are present in the full
-design and not in the reduced design. This tests the null hypothesis
-that all the coefficients from these variables and levels of these factors
-are equal to zero.
-
-The likelihood ratio test $p$ values therefore
-represent a test of \textit{all the variables and all the levels of factors}
-which are among these variables. However, the results table only has space for
-one column of log fold change, so a single variable and a single
-comparison is shown (among the potentially multiple log fold changes
-which were tested in the likelihood ratio test). 
-This is indicated at the top of the results table
-with the text, e.g.: ``log2 fold change (MLE): condition C vs A'' followed
-by ``LRT p-value: '\lowtilde{} batch + condition' vs '\lowtilde{} batch' ''.
-This indicates that the $p$ value is for the likelihood ratio test of
-\textit{all the variables and all the levels}, while the log fold change is a single
-comparison from among those variables and levels.
-See the help page for \Rfunction{results} for more details.
-
-\subsection{What are the exact steps performed by \Rfunction{DESeq()}?}
-
-See the manual page for \Rfunction{DESeq}, which links to the 
-subfunctions which are called in order, where complete details are listed.
-
-\subsection{Is there an official Galaxy tool for DESeq2?}
-
-Yes. The repository for the \deseqtwo{} tool is
-\url{https://github.com/galaxyproject/tools-iuc/tree/master/tools/deseq2} 
-and a link to its location in the Tool Shed is 
-\url{https://toolshed.g2.bx.psu.edu/view/iuc/deseq2/d983d19fbbab}.
-
-\subsection{I want to benchmark DESeq2 comparing to other DE tools.}
-
-One aspect which can cause problems for comparison is that, by default,
-\deseqtwo{} outputs \Rcode{NA} values for adjusted $p$ values based on 
-independent filtering of genes which have low counts.
-This is a way for the \deseqtwo{} to give extra
-information on why the adjusted $p$ value for this gene is not small.
-Additionally, $p$ values can be set to \Rcode{NA} based on extreme 
-count outlier detection (see Section~\ref{sec:moreInfo} for full details). 
-These \Rcode{NA} values should be considered
-negatives for purposes of estimating sensitivity and specificity. The
-easiest way to work with the adjusted $p$ values in a benchmarking
-context is probably to convert these \Rcode{NA} values to 1:
-
-<<convertNA, eval=FALSE>>=
-res$padj <- ifelse(is.na(res$padj), 1, res$padj)
-@ 
-
-\section{Acknowledgments}
-
-We have benefited in the development of \deseqtwo{} from the help and
-feedback of many individuals, including but not limited to: 
-The Bionconductor Core Team,
-Alejandro Reyes, Andrzej Ole\'s, Aleksandra Pekowska, Felix Klein,
-Nikolaos Ignatiadis,
-Vince Carey,
-Owen Solberg,
-Ruping Sun,
-Devon Ryan, 
-Steve Lianoglou, Jessica Larson, Christina Chaivorapol, Pan Du, Richard Bourgon,
-Willem Talloen, 
-Elin Videvall, Hanneke van Deutekom,
-Todd Burwell, 
-Jesse Rowley,
-Igor Dolgalev,
-Stephen Turner,
-Ryan C Thompson,
-Tyr Wiesner-Hanks,
-Konrad Rudolph,
-David Robinson,
-Mingxiang Teng,
-Mathias Lesche,
-Sonali Arora,
-Jordan Ramilowski,
-Ian Dworkin,
-Bj\"orn Gr\"uning,
-Ryan McMinds,
-Paul Gordon,
-Leonardo Collado Torres,
-Enrico Ferrero.
-\section{Session Info}
-
-<<sessInfo, results="asis", echo=FALSE>>=
-toLatex(sessionInfo())
-@
-
-<<resetOptions, results="hide", echo=FALSE>>=
-options(prompt="> ", continue="+ ")
-@ 
-
-\bibliography{library}
-
-\end{document}
diff --git a/vignettes/library.bib b/vignettes/library.bib
index 6d01a57..f0968d4 100644
--- a/vignettes/library.bib
+++ b/vignettes/library.bib
@@ -281,20 +281,22 @@ journal = {Bioinformatics}
   year = {2014}
 }
 
- at article{Patro2015Salmon,
-  author = {Patro, Rob and Duggal, Geet and Kingsford, Carl},
+ at article{Patro2016Salmon,
+  author = {Patro, Rob and Duggal, Geet and Love, Michael I. and Irizarry, Rafael A. and Kingsford, Carl},
   journal = {bioRxiv},
-  title = {Salmon: Accurate, Versatile and Ultrafast Quantification from RNA-seq Data using Lightweight-Alignment},
-  url = {http://biorxiv.org/content/early/2015/06/27/021592},
-  year = 2015
+  title = {Salmon provides accurate, fast, and bias-aware transcript expression estimates using dual-phase inference},
+  url = {http://biorxiv.org/content/early/2016/08/30/021592},
+  year = 2016
 }
 
- at article{Bray2015Near,
+ at article{Bray2016Near,
   author = {Bray, Nicolas and Pimentel, Harold and Melsted, Pall and Pachter, Lior},
-  journal = {arXiv},
-  title = {Near-optimal RNA-Seq quantification},
-  url = {http://arxiv.org/abs/1505.02710},
-  year = 2015
+  journal = {Nature Biotechnology},
+  pages = {525–-527},
+  title = {Near-optimal probabilistic RNA-seq quantification},
+  volume = {34},
+  url = {http://dx.doi.org/10.1038/nbt.3519},
+  year = 2016
 }
 
 @article{Robert2015Errors,
diff --git a/vignettes/sed_call b/vignettes/sed_call
new file mode 100644
index 0000000..34ca70f
--- /dev/null
+++ b/vignettes/sed_call
@@ -0,0 +1 @@
+sed -e 's/rmarkdown::html_document:/BiocStyle::pdf_document2:/' -e '/highlight: pygments/d' -e 's/Analyzing RNA-seq data with DESeq2/Analyzing RNA-seq data with DESeq2 (PDF)/g' -e 's/This is the source document/This is a derived document, DO NOT EDIT/' -e '/DESeq2 package version: `r packageVersion("DESeq2")`/d' DESeq2.Rmd > DESeq2_pdf.Rmd

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-bioc-deseq2.git



More information about the debian-med-commit mailing list