[med-svn] [python-skbio] 01/04: Imported Upstream version 0.5.0

Kevin Murray daube-guest at moszumanska.debian.org
Tue Jun 28 04:26:19 UTC 2016


This is an automated email from the git hooks/post-receive script.

daube-guest pushed a commit to branch master
in repository python-skbio.

commit 2cca4ca103ce81a361ad301b4fe8aba7012f52a7
Author: Kevin Murray <spam at kdmurray.id.au>
Date:   Tue Jun 28 13:53:51 2016 +1000

    Imported Upstream version 0.5.0
---
 .travis.yml                                        |   1 -
 CHANGELOG.md                                       |  54 ++
 CONTRIBUTING.md                                    |  11 +-
 README.rst                                         |  37 +-
 RELEASE.md                                         |  16 +-
 asv.conf.json                                      |   5 +-
 benchmarks/benchmarks.py                           |   3 +-
 checklist.py                                       |  37 +-
 ci/conda_requirements.txt                          |   2 -
 ci/pip_requirements.txt                            |   2 -
 doc/README.md                                      |   2 +-
 doc/source/conf.py                                 |  18 +-
 doc/source/development/coding_guidelines.rst       |  17 +-
 doc/source/development/new_module.rst              |   4 -
 doc/source/development/py3.rst                     | 366 --------
 doc/source/index.rst                               |   3 +-
 licenses/python.txt                                | 270 ------
 setup.py                                           |  35 +-
 skbio/__init__.py                                  |   5 +-
 skbio/_base.py                                     | 803 +----------------
 skbio/alignment/__init__.py                        |   2 -
 skbio/alignment/_indexing.py                       |  10 +-
 skbio/alignment/_lib/__init__.py                   |   2 -
 skbio/alignment/_pairwise.py                       |  44 +-
 skbio/alignment/_repr.py                           |   3 +-
 skbio/alignment/_tabular_msa.py                    | 400 ++++++---
 skbio/alignment/tests/__init__.py                  |   2 -
 skbio/alignment/tests/test_pairwise.py             | 173 ++--
 skbio/alignment/tests/test_ssw.py                  |  11 +-
 skbio/alignment/tests/test_tabular_msa.py          | 997 +++++++++++++--------
 skbio/diversity/__init__.py                        |  15 +-
 skbio/diversity/_driver.py                         | 109 ++-
 skbio/diversity/_util.py                           |  10 +-
 skbio/diversity/alpha/__init__.py                  |   2 -
 skbio/diversity/alpha/_ace.py                      |   2 -
 skbio/diversity/alpha/_base.py                     |   2 -
 skbio/diversity/alpha/_chao1.py                    |   2 -
 skbio/diversity/alpha/_faith_pd.py                 |   8 +-
 skbio/diversity/alpha/_gini.py                     |   2 -
 skbio/diversity/alpha/_lladser.py                  |   2 -
 skbio/diversity/alpha/tests/__init__.py            |   2 -
 skbio/diversity/alpha/tests/test_ace.py            |   2 -
 skbio/diversity/alpha/tests/test_base.py           |  12 +-
 skbio/diversity/alpha/tests/test_chao1.py          |   2 -
 skbio/diversity/alpha/tests/test_faith_pd.py       |  48 +-
 skbio/diversity/alpha/tests/test_gini.py           |   2 -
 skbio/diversity/alpha/tests/test_lladser.py        |   2 -
 skbio/diversity/beta/__init__.py                   |   2 -
 skbio/diversity/beta/_unifrac.py                   |  24 +-
 skbio/diversity/beta/tests/__init__.py             |   2 -
 skbio/diversity/beta/tests/test_unifrac.py         |  50 +-
 skbio/diversity/tests/__init__.py                  |   2 -
 skbio/diversity/tests/test_driver.py               | 249 +++--
 skbio/diversity/tests/test_util.py                 |  61 +-
 skbio/io/__init__.py                               |   2 -
 skbio/io/_exception.py                             |   2 -
 skbio/io/_fileobject.py                            |  94 +-
 skbio/io/_iosources.py                             |  57 +-
 skbio/io/_warning.py                               |   2 -
 skbio/io/format/__init__.py                        |   2 -
 skbio/io/format/_base.py                           |   4 -
 skbio/io/format/_blast.py                          |   3 -
 skbio/io/format/blast6.py                          |  19 +-
 skbio/io/format/blast7.py                          |  27 +-
 skbio/io/format/clustal.py                         |   3 -
 skbio/io/format/emptyfile.py                       |   3 -
 skbio/io/format/fasta.py                           |  15 +-
 skbio/io/format/fastq.py                           |  16 +-
 skbio/io/format/genbank.py                         |   8 +-
 skbio/io/format/lsmat.py                           |   3 -
 skbio/io/format/newick.py                          |   5 -
 skbio/io/format/ordination.py                      |   6 +-
 skbio/io/format/phylip.py                          |   5 +-
 skbio/io/format/qseq.py                            |   5 -
 skbio/io/format/stockholm.py                       | 134 ++-
 skbio/io/format/tests/__init__.py                  |   2 -
 .../format/tests/data/fastq_single_seq_illumina1.8 |   4 +
 .../format/tests/data/stockholm_different_padding  |   8 +
 .../tests/data/stockholm_missing_reference_items   |   5 +
 .../io/format/tests/data/stockholm_missing_rn_tag  |   3 +
 .../tests/data/stockholm_multi_line_tree_no_id     |   4 +
 .../tests/data/stockholm_multi_line_tree_with_id   |   5 +
 .../tests/data/stockholm_multiple_multi_line_trees |   8 +
 .../tests/data/stockholm_multiple_references       |  20 +
 ...m_runon_gf => stockholm_runon_gf_no_whitespace} |   2 +-
 ...runon_gf => stockholm_runon_gf_with_whitespace} |   0
 skbio/io/format/tests/data/stockholm_runon_gs      |   5 -
 .../tests/data/stockholm_runon_gs_no_whitespace    |   5 +
 .../tests/data/stockholm_runon_gs_with_whitespace  |   5 +
 .../format/tests/data/stockholm_runon_references   |  10 +
 .../tests/data/stockholm_runon_references_mixed    |  10 +
 .../format/tests/data/stockholm_single_reference   |   8 +
 skbio/io/format/tests/test_base.py                 |  24 +-
 skbio/io/format/tests/test_blast6.py               |  20 +-
 skbio/io/format/tests/test_blast7.py               |  44 +-
 skbio/io/format/tests/test_clustal.py              |  72 +-
 skbio/io/format/tests/test_emptyfile.py            |   8 +-
 skbio/io/format/tests/test_fasta.py                |  48 +-
 skbio/io/format/tests/test_fastq.py                |  27 +-
 skbio/io/format/tests/test_genbank.py              |  16 +-
 skbio/io/format/tests/test_lsmat.py                |  41 +-
 skbio/io/format/tests/test_newick.py               |  22 +-
 skbio/io/format/tests/test_ordination.py           |   9 +-
 skbio/io/format/tests/test_phylip.py               |  12 +-
 skbio/io/format/tests/test_qseq.py                 |   4 -
 skbio/io/format/tests/test_stockholm.py            | 377 ++++++--
 skbio/io/registry.py                               |  46 +-
 skbio/io/tests/__init__.py                         |   2 -
 skbio/io/tests/test_iosources.py                   |   2 -
 skbio/io/tests/test_registry.py                    | 164 ++--
 skbio/io/tests/test_util.py                        |  97 +-
 skbio/io/util.py                                   |  57 +-
 skbio/{io/format => metadata}/__init__.py          |  12 +-
 skbio/metadata/_mixin.py                           | 408 +++++++++
 .../{util/_metadata_repr.py => metadata/_repr.py}  |  31 +-
 skbio/{util => metadata}/_testing.py               | 565 +++---------
 .../alpha => metadata}/tests/__init__.py           |   2 -
 skbio/metadata/tests/test_mixin.py                 |  80 ++
 skbio/sequence/__init__.py                         |  76 +-
 skbio/sequence/_dna.py                             |  83 +-
 skbio/sequence/_genetic_code.py                    |  67 +-
 skbio/sequence/_grammared_sequence.py              | 221 +++--
 skbio/sequence/_nucleotide_mixin.py                |  30 +-
 skbio/sequence/_protein.py                         |  22 +-
 skbio/sequence/_repr.py                            |   4 +-
 skbio/sequence/_rna.py                             |  83 +-
 skbio/sequence/_sequence.py                        | 235 +++--
 skbio/sequence/distance.py                         |  87 +-
 skbio/sequence/tests/__init__.py                   |   2 -
 skbio/sequence/tests/test_distance.py              | 126 ++-
 skbio/sequence/tests/test_dna.py                   |   5 +-
 skbio/sequence/tests/test_genetic_code.py          |  72 +-
 skbio/sequence/tests/test_grammared_sequence.py    | 160 ++--
 skbio/sequence/tests/test_nucleotide_sequences.py  |  24 +-
 skbio/sequence/tests/test_protein.py               |  12 +-
 skbio/sequence/tests/test_rna.py                   |   5 +-
 skbio/sequence/tests/test_sequence.py              | 406 ++++-----
 skbio/stats/__init__.py                            |   2 -
 skbio/stats/__subsample.c                          | 122 +--
 skbio/stats/__subsample.pyx                        |   2 -
 skbio/stats/_misc.py                               |   2 -
 skbio/stats/_subsample.py                          |  30 +-
 skbio/stats/composition.py                         | 175 +++-
 skbio/stats/distance/__init__.py                   |   2 -
 skbio/stats/distance/_anosim.py                    |   2 -
 skbio/stats/distance/_base.py                      |  15 +-
 skbio/stats/distance/_bioenv.py                    |   2 -
 skbio/stats/distance/_mantel.py                    |   8 +-
 skbio/stats/distance/_permanova.py                 |   3 -
 skbio/stats/distance/tests/__init__.py             |   2 -
 skbio/stats/distance/tests/test_anosim.py          |   8 +-
 skbio/stats/distance/tests/test_base.py            |  19 +-
 skbio/stats/distance/tests/test_bioenv.py          |   1 -
 skbio/stats/distance/tests/test_mantel.py          |   7 +-
 skbio/stats/distance/tests/test_permanova.py       |   8 +-
 skbio/stats/evolve/__init__.py                     |   2 -
 skbio/stats/evolve/_hommola.py                     |   3 -
 skbio/stats/evolve/tests/__init__.py               |   2 -
 skbio/stats/evolve/tests/test_hommola.py           |   1 -
 skbio/stats/gradient.py                            |  10 +-
 skbio/stats/ordination/__init__.py                 |  14 +-
 .../_canonical_correspondence_analysis.py          |  22 +-
 skbio/stats/ordination/_correspondence_analysis.py |  14 +-
 .../ordination/_ordination_results.py}             | 450 +---------
 .../ordination/_principal_coordinate_analysis.py   |  23 +-
 skbio/stats/ordination/_redundancy_analysis.py     |  25 +-
 skbio/stats/ordination/_utils.py                   |   2 -
 skbio/stats/ordination/tests/__init__.py           |   2 -
 .../test_canonical_correspondence_analysis.py      |  31 +-
 .../tests/test_correspondence_analysis.py          |   2 -
 .../ordination/tests/test_ordination_results.py}   | 160 +---
 .../tests/test_principal_coordinate_analysis.py    |   2 -
 .../ordination/tests/test_redundancy_analysis.py   |  43 +-
 skbio/stats/ordination/tests/test_util.py          |   3 -
 skbio/stats/power.py                               |  11 +-
 skbio/stats/tests/__init__.py                      |   2 -
 skbio/stats/tests/test_composition.py              | 387 ++++++--
 skbio/stats/tests/test_gradient.py                 |  37 +-
 skbio/stats/tests/test_misc.py                     |   1 -
 skbio/stats/tests/test_power.py                    |   2 -
 skbio/stats/tests/test_subsample.py                |  55 +-
 skbio/test.py                                      |   2 -
 skbio/tests/__init__.py                            |   2 -
 skbio/tests/test_base.py                           | 348 +------
 skbio/tests/test_workflow.py                       |   3 -
 skbio/tree/__init__.py                             |   2 -
 skbio/tree/_exception.py                           |   2 -
 skbio/tree/_majority_rule.py                       |  34 +-
 skbio/tree/_nj.py                                  |   5 +-
 skbio/tree/_tree.py                                |  51 +-
 skbio/tree/tests/__init__.py                       |   2 -
 skbio/tree/tests/test_majority_rule.py             |  77 +-
 skbio/tree/tests/test_nj.py                        |  13 +-
 skbio/tree/tests/test_tree.py                      | 246 ++---
 skbio/util/__init__.py                             |   2 -
 skbio/util/_decorator.py                           |   3 +-
 skbio/util/_exception.py                           |   2 -
 skbio/util/_misc.py                                |  49 +-
 skbio/util/_testing.py                             | 859 +-----------------
 skbio/util/_warning.py                             |   2 -
 skbio/util/tests/__init__.py                       |   2 -
 skbio/util/tests/test_decorator.py                 |  59 +-
 skbio/util/tests/test_misc.py                      |  22 +-
 skbio/util/tests/test_testing.py                   |   2 -
 skbio/workflow.py                                  |  22 +-
 205 files changed, 5219 insertions(+), 6686 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index bc23772..bf23db8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,6 @@ language: python
 env:
   - PYTHON_VERSION=3.5 MAKE_DOC=TRUE
   - PYTHON_VERSION=3.4 USE_CYTHON=TRUE
-  - PYTHON_VERSION=2.7
 before_install:
   - "export DISPLAY=:99.0"
   - "sh -e /etc/init.d/xvfb start"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a9c6317..a9f74af 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,59 @@
 # scikit-bio changelog
 
+## Version 0.5.0 (2016-06-14)
+
+**IMPORTANT**: scikit-bio is no longer compatible with Python 2. scikit-bio is compatible with Python 3.4 and later.
+
+### Features
+* Added more descriptive error message to `skbio.io.registry` when attempting to read without specifying `into` and when there is no generator reader. ([#1326](https://github.com/biocore/scikit-bio/issues/1326))
+* Added support for reference tags to `skbio.io.format.stockholm` reader and writer. ([#1348](https://github.com/biocore/scikit-bio/issues/1348))
+* Expanded error message in `skbio.io.format.stockholm` reader when `constructor` is not passed, in order to provide better explanation to user. ([#1327](https://github.com/biocore/scikit-bio/issues/1327))
+* Added `skbio.sequence.distance.kmer_distance` for computing the kmer distance between two sequences. ([#913](https://github.com/biocore/scikit-bio/issues/913))
+* Added `skbio.sequence.Sequence.replace` for assigning a character to positions in a `Sequence`. ([#1222](https://github.com/biocore/scikit-bio/issues/1222))
+* Added support for `pandas.RangeIndex`, lowering the memory footprint of default integer index objects. `Sequence.positional_metadata` and `TabularMSA.positional_metadata` now use `pd.RangeIndex` as the positional metadata index. `TabularMSA` now uses `pd.RangeIndex` as the default index. Usage of `pd.RangeIndex` over the previous `pd.Int64Index` [should be transparent](http://pandas.pydata.org/pandas-docs/version/0.18.0/whatsnew.html#range-index), so these changes should be non-breakin [...]
+* Added `reset_index=False` parameter to `TabularMSA.append` and `TabularMSA.extend` for resetting the MSA's index to the default index after appending/extending.
+* Added support for partial pairwise calculations via `skbio.diversity.partial_beta_diversity`. ([#1221](https://github.com/biocore/scikit-bio/issues/1221), [#1337](https://github.com/biocore/scikit-bio/pull/1337)). This function is immediately deprecated as its return type will change in the future and should be used with caution in its present form (see the function's documentation for details).
+* `TemporaryFile` and `NamedTemporaryFile` are now supported IO sources for `skbio.io` and related functionality.  ([#1291](https://github.com/biocore/scikit-bio/issues/1291))
+* Added `tree_node_class=TreeNode` parameter to `skbio.tree.majority_rule` to support returning consensus trees of type `TreeNode` (the default) or a type that has the same interface as `TreeNode` (e.g. `TreeNode` subclasses) ([#1193](https://github.com/biocore/scikit-bio/pull/1193))
+* `TreeNode.from_linkage_matrix` and `TreeNode.from_taxonomy` now support constructing `TreeNode` subclasses. `TreeNode.bifurcate` now supports `TreeNode` subclasses ([#1193](https://github.com/biocore/scikit-bio/pull/1193))
+* The `ignore_metadata` keyword has been added to `TablueMSA.iter_positions` to improve performance when metadata is not necessary.
+* Pairwise aligners in `skbio.alignment` now propagate per-sequence `metadata` objects (this does not include `positional_metadata`).
+
+### Backward-incompatible changes [stable]
+
+### Backward-incompatible changes [experimental]
+* `TabularMSA.append` and `TabularMSA.extend` now require one of `minter`, `index`, or `reset_index` to be provided when incorporating new sequences into an MSA. Previous behavior was to auto-increment the index labels if `minter` and `index` weren't provided and the MSA had a default integer index, otherwise error. Use `reset_index=True` to obtain the previous behavior in a more explicit way.
+* `skbio.stats.composition.ancom` now returns two `pd.DataFrame` objects, where it previously returned one. The first contains the ANCOM test results, as before, and the second contains percentile abundances of each feature in each group. The specific percentiles that are computed and returned is controlled by the new `percentiles` parameter to `skbio.stats.composition.ancom`. In the future, this second `pd.DataFrame` will not be returned by this function, but will be available through t [...]
+* `skbio.stats.composition.ancom` now performs multiple comparisons correction by default. The previous behavior of not performing multiple comparisons correction can be achieved by passing ``multiple_comparisons_correction=None``.
+* The ``reject`` column in the first ``pd.DataFrame`` returned from `skbio.stats.composition.ancom` has been renamed ``Reject null hypothesis`` for clarity. ([#1375](https://github.com/biocore/scikit-bio/issues/1375))
+
+### Bug fixes
+* Fixed row and column names to `biplot_scores` in the `OrdinationResults` object from `skbio.stats.ordination`. This fix affect the `cca` and `rda` methods. ([#1322](https://github.com/biocore/scikit-bio/issues/1322))
+* Fixed bug when using `skbio.io.format.stockholm` reader on file with multi-line tree with no id. Previously this raised an `AttributeError`, now it correctly handles this type of tree. ([#1334](https://github.com/biocore/scikit-bio/issues/1334))
+* Fixed bug when reading Stockholm files with GF or GS features split over multiple lines. Previously, the feature text was simply concatenated because it was assumed to have trailing whitespace. There are examples of Stockholm files with and without trailing whitespace for multi-line features, so the `skbio.io.format.stockholm` reader now adds a single space when concatenating feature text without trailing whitespace to avoid joining words together. Multi-line trees stored as GF metadat [...]
+* Fixed bug when using `Sequence.iter_kmers` on empty `Sequence` object. Previously this raised a `ValueError`, now it returns
+an empty generator.
+* Fixed minor bug where adding sequences to an empty `TabularMSA` with MSA-wide `positional_metadata` would result in a `TabularMSA` object in an inconsistent state. This could happen using `TabularMSA.append` or `TabularMSA.extend`. This bug only affects a `TabularMSA` object *without* sequences that has MSA-wide `positional_metadata` (for example, `TabularMSA([], positional_metadata={'column': []})`).
+* `TreeNode.distance` now handles the situation in which `self` or `other` are ancestors. Previosly, a node further up the tree was used resulting in inflated distances. ([#807](https://github.com/biocore/scikit-bio/issues/807))
+* `TreeNode.prune` can now handle a root with a single descendent. Previously, the root was ignored from possibly having a single descendent. ([#1247](https://github.com/biocore/scikit-bio/issues/1247))
+* Providing the `format` keyword to `skbio.io.read` when creating a generator with an empty file will now return an empty generator instead of raising `StopIteration`. ([#1313](https://github.com/biocore/scikit-bio/issues/1313))
+* `OrdinationResults` is now importable from `skbio` and `skbio.stats.ordination` and correctly linked from the documentation ([#1205](https://github.com/biocore/scikit-bio/issues/1205))
+* Fixed performance bug in pairwise aligners resulting in 100x worse performance than in 0.2.4.
+
+### Deprecated functionality [stable]
+* Deprecated use of the term "non-degenerate", in favor of "definite". `GrammaredSequence.nondegenerate_chars`, `GrammaredSequence.nondegenerates`, and `GrammaredSequence.has_nondegenerates` have been renamed to `GrammaredSequence.definite_chars`, `GrammaredSequence.definites`, and `GrammaredSequence.has_definites`, respectively. The old names will be removed in scikit-bio 0.5.2. Relevant affected public classes include `GrammaredSequence`, `DNA`, `RNA`, and `Protein`.
+
+### Deprecated functionality [experimental]
+* Deprecated function `skbio.util.create_dir`. This function will be removed in scikit-bio 0.5.1. Please use the Python standard library
+functionality described [here](https://docs.python.org/2/library/os.html#os.makedirs). ([#833](https://github.com/biocore/scikit-bio/issues/833))
+* Deprecated function `skbio.util.remove_files`. This function will be removed in scikit-bio 0.5.1. Please use the Python standard library
+functionality described [here](https://docs.python.org/2/library/os.html#os.remove). ([#833](https://github.com/biocore/scikit-bio/issues/833))
+* Deprecated function `skbio.util.is_casava_v180_or_later`. This function will be removed in 0.5.1. Functionality moved to FASTQ sniffer.
+([#833](https://github.com/biocore/scikit-bio/issues/833))
+
+### Miscellaneous
+* When installing scikit-bio via `pip`, numpy must now be installed first ([#1296](https://github.com/biocore/scikit-bio/issues/1296))
+
 ## Version 0.4.2 (2016-02-17)
 
 Minor maintenance release. **This is the last Python 2.7 compatible release. Future scikit-bio releases will only support Python 3.**
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f167f1d..349b031 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -55,8 +55,6 @@ Submitting code to scikit-bio
 
 scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pull Request](https://help.github.com/articles/using-pull-requests) mechanism for reviewing and accepting submissions. You should work through the following steps to submit code to scikit-bio.
 
-**Note:** We recommend developing scikit-bio in a Python 3 environment because doctests must be written (and pass) in Python 3. See [Setting up a development environment](#setting-up-a-development-environment).
-
 1. Begin by [creating an issue](https://github.com/biocore/scikit-bio/issues) describing your proposed change (see [Types of contributions](#types-of-contributions) for details).
 
 2. [Fork](https://help.github.com/articles/fork-a-repo) the scikit-bio repository on the GitHub website.
@@ -109,11 +107,13 @@ scikit-bio is hosted on [GitHub](http://www.github.com), and we use GitHub's [Pu
 Setting up a development environment
 ------------------------------------
 
+**Note:** scikit-bio must be developed in a Python 3.4 or later environment.
+
 The recommended way to set up a development environment for contributing to scikit-bio is using [Anaconda](https://store.continuum.io/cshop/anaconda/) by Continuum Analytics, with its associated command line utility `conda`. The primary benefit of `conda` over `pip` is that on some operating systems (ie Linux), `pip` installs packages from source. This can take a very long time to install Numpy, scipy, matplotlib, etc. `conda` installs these packages using pre-built binaries, so the inst [...]
 
 1. Install Anaconda
 
- See [Continuum's site](https://store.continuum.io/cshop/anaconda/) for instructions. [Miniconda](http://conda.pydata.org/docs/install/quick.html) provides a great fast way to get conda up and running.
+ See [Continuum's site](https://store.continuum.io/cshop/anaconda/) for instructions. [Miniconda](http://conda.pydata.org/docs/install/quick.html) provides a fast way to get conda up and running.
 
 2. Create a new conda environment
  ```
@@ -169,14 +169,13 @@ Testing guidelines
 
 All code that is added to scikit-bio must be unit tested, and the unit test code must be submitted in the same pull request as the library code that you are submitting. We will only merge code that is unit tested and that passes the [continuous integration build](https://github.com/biocore/scikit-bio/blob/master/.travis.yml). This build includes, but is not limited to, the following checks:
 
-- Full unit test suite executes without errors in Python 2 and 3.
-- Doctests execute correctly in Python 3.
+- Full unit test suite and doctests execute without errors in supported versions of Python 3.
 - C code can be correctly compiled.
 - Cython code is correctly generated.
 - All tests import functionality from the appropriate minimally deep API.
 - Documentation can be built.
 - Current code coverage is maintained or improved.
-- Code passes ``pep8``/``flake8`` checks.
+- Code passes ``flake8`` checks.
 
 Running ``make test`` locally during development will include a subset of the full checks performed by Travis-CI.
 
diff --git a/README.rst b/README.rst
index 08a8897..e7635bf 100644
--- a/README.rst
+++ b/README.rst
@@ -3,32 +3,41 @@
    :target: http://scikit-bio.org
    :alt: scikit-bio logo
 
-|Build Status| |Coverage Status| |Gitter Badge|
+|Build Status| |Coverage Status| |ASV Benchmarks| |Gitter Badge| |Depsy Badge| |Anaconda Cloud Build| |Anaconda Cloud| |License| |Downloads| |Install|
 
-scikit-bio is an open-source, BSD-licensed Python package providing data structures, algorithms and educational resources for bioinformatics.
+scikit-bio is an open-source, BSD-licensed Python 3 package providing data structures, algorithms and educational resources for bioinformatics.
 
 To view scikit-bio's documentation, visit `scikit-bio.org
 <http://scikit-bio.org>`__.
 
+**Note:** scikit-bio is no longer compatible with Python 2. scikit-bio is compatible with Python 3.4 and later.
+
 scikit-bio is currently in beta. We are very actively developing it, and **backward-incompatible interface changes can and will arise**. To avoid these types of changes being a surprise to our users, our public APIs are decorated to make it clear to users when an API can be relied upon (stable) and when it may be subject to change (experimental). See the `API stability docs <https://github.com/biocore/scikit-bio/blob/master/doc/source/user/api_stability.rst>`_ for more details, including [...]
 
 Installing
 ----------
 
-To install the latest release of scikit-bio::
+The recommended way to install scikit-bio is via the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_.
 
-    pip install scikit-bio
-
-Equivalently, you can use the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_ to install scikit-bio and its dependencies without having to compile them::
+To install the latest release of scikit-bio::
 
     conda install -c https://conda.anaconda.org/biocore scikit-bio
 
-Finally, most of scikit-bio's dependencies (in particular, the ones that are trickier to build) are also available, albeit only for Python 2, in `Canopy Express <https://www.enthought.com/canopy-express/>`_.
+Alternatively, you can install scikit-bio using ``pip``::
+
+    pip install numpy
+    pip install scikit-bio
 
 You can verify your installation by running the scikit-bio unit tests::
 
     python -m skbio.test
 
+For users of Debian, ``skbio`` is in the Debian software distribution and may
+be installed using::
+
+    sudo apt-get install python3-skbio python-skbio-doc
+
+
 Getting help
 ------------
 
@@ -102,6 +111,20 @@ scikit-bio's logo was created by `Alina Prassas <http://cargocollective.com/alin
    :target: https://travis-ci.org/biocore/scikit-bio
 .. |Coverage Status| image:: https://coveralls.io/repos/biocore/scikit-bio/badge.png
    :target: https://coveralls.io/r/biocore/scikit-bio
+.. |ASV Benchmarks| image:: http://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat
+   :target: https://s3-us-west-2.amazonaws.com/scikit-bio.org/benchmarks/master/index.html
 .. |Gitter Badge| image:: https://badges.gitter.im/Join%20Chat.svg
    :alt: Join the chat at https://gitter.im/biocore/scikit-bio
    :target: https://gitter.im/biocore/scikit-bio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
+.. |Depsy Badge| image:: http://depsy.org/api/package/pypi/scikit-bio/badge.svg
+   :target: http://depsy.org/package/python/scikit-bio
+.. |Anaconda Cloud Build| image:: https://anaconda.org/biocore/scikit-bio/badges/build.svg
+   :target: https://anaconda.org/biocore/scikit-bio/builds
+.. |Anaconda Cloud| image:: https://anaconda.org/biocore/scikit-bio/badges/version.svg
+   :target: https://anaconda.org/biocore/scikit-bio
+.. |License| image:: https://anaconda.org/biocore/scikit-bio/badges/license.svg
+   :target: https://anaconda.org/biocore/scikit-bio
+.. |Downloads| image:: https://anaconda.org/biocore/scikit-bio/badges/downloads.svg
+   :target: https://anaconda.org/biocore/scikit-bio
+.. |Install| image:: https://anaconda.org/biocore/scikit-bio/badges/installer/conda.svg
+   :target: https://conda.anaconda.org/biocore
diff --git a/RELEASE.md b/RELEASE.md
index dc9a9f5..e3fd1ea 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -2,9 +2,7 @@
 
 ## Introduction
 
-Releasing a piece of software can simultaneously be an invigorating, intimidating, horrifying, and cathartic experience. This guide aims to make the release process as smooth as possible.
-
-To illustrate examples of commands you might run, let's assume that the current version is 1.2.3-dev and we want to release version 1.2.4. Our versioning system is based on Semantic Versioning, which you can read about at http://semver.org.
+This guide explains how to release a new version of scikit-bio. To illustrate examples of commands you might run, let's assume that the current version is 1.2.3-dev and we want to release version 1.2.4. Our versioning system is based on Semantic Versioning, which you can read about at http://semver.org.
 
 **Note:** The following commands assume you are in the top-level directory of the scikit-bio repository unless otherwise noted. They also assume that you have [virtualenv](http://virtualenv.readthedocs.org/en/latest/#)/[virtualenvwrapper](http://virtualenvwrapper.readthedocs.org/en/latest/) installed.
 
@@ -30,7 +28,7 @@ In the meantime, you can build the documentation and update the website.
 
 1. Build the documentation locally:
 
-        make -C doc clean && make -C doc html
+        make -C doc clean html
 
 2. Switch to the ``gh-pages`` branch of the repository.
 
@@ -60,7 +58,7 @@ If the tests passed on Travis (see step 4 of **Prep the release (part 1)** above
 
 ## Tag the release
 
-From the [scikit-bio GitHub page](https://github.com/biocore/scikit-bio), click on the releases tab and draft a new release. Use the version number for the tag name (1.2.4) and create the tag against master. Fill in a release title that is consistent with previous release titles and add a summary of the release (linking to ``CHANGELOG.md`` is a good idea). This release summary will be the primary information that we point users to when we announce the release. This is (at least experimen [...]
+From the [scikit-bio GitHub page](https://github.com/biocore/scikit-bio), click on the releases tab and draft a new release. Use the version number for the tag name (1.2.4) and create the tag against master. Fill in a release title that is consistent with previous release titles and add a summary of the release (linking to ``CHANGELOG.md`` is a good idea). This release summary will be the primary information that we point users to when we announce the release.
 
 Once the release is created on GitHub, it's a good idea to test out the release tarball before publishing to PyPI:
 
@@ -80,8 +78,6 @@ Once the release is created on GitHub, it's a good idea to test out the release
 
 Assuming the GitHub release tarball correctly installs and passes its tests, you're now ready to test the creation of the source distribution (``sdist``) that will be published to PyPI. It is important to test the source distribution because it is created in an entirely different way than the release tarball on GitHub. Thus, there is the danger of having two different release tarballs: the one created on GitHub and the one uploaded to PyPI.
 
-**Important:** Check ``MANIFEST.in`` to ensure that the files and directories it references still exist. Some may have been removed, renamed, or there may be new files/dirs that need to be included in the ``sdist`` release. This step in the release process has caused the most hangups; don't neglect ``MANIFEST.in``!
-
 1. Download the release tarball from GitHub, extract it, and ``cd`` into the top-level directory.
 
 2. Build a source distribution:
@@ -117,10 +113,10 @@ Assuming the GitHub release tarball correctly installs and passes its tests, you
     Due to its C extensions, releasing scikit-bio packages for different platforms will require you to perform the following steps on each of those platforms. For example, an ``osx-64`` package will need to be built on OS X, and a ``linux-64`` package will need to be built on 64-bit Linux. These steps will be the same on all platforms, so you should repeat them for every platform you want to release for.
 
         conda skeleton pypi scikit-bio
-        conda build scikit-bio --python 2.7
+        conda build scikit-bio --python 3.4
         conda build scikit-bio --python 3.5
 
-    At this stage you have built Python 2.7 and 3.5 packages. The absolute path to the packages will be provided as output from each ``conda build`` commands. You should now create conda environments for each, and run the tests as described above. You can install these local packages as follows:
+    At this stage you have built Python 3.4 and 3.5 packages. The absolute path to the packages will be provided as output from each ``conda build`` commands. You should now create conda environments for each, and run the tests as described above. You can install these local packages as follows:
 
         conda install --use-local scikit-bio
 
@@ -128,7 +124,7 @@ Assuming the GitHub release tarball correctly installs and passes its tests, you
 
         anaconda upload -u biocore <package-filepath>
 
-    ``<package-filepath>`` should be replaced with the path to the package that was was created above. Repeat this for each package you created (here, the Python 2.7 and 3.5 packages).
+    ``<package-filepath>`` should be replaced with the path to the package that was was created above. Repeat this for each package you created (here, the Python 3.4 and 3.5 packages).
 
     After uploading, you should create new environments for every package you uploaded, install scikit-bio from each package, and re-run the tests. You can install the packages you uploaded as follows:
 
diff --git a/asv.conf.json b/asv.conf.json
index d219cbd..fa38050 100644
--- a/asv.conf.json
+++ b/asv.conf.json
@@ -11,9 +11,8 @@
   "project": "scikit-bio",
   "project_url": "http://scikit-bio.org/",
   "pythons": [
-    "3.4",
-    "3.3",
-    "2.7"
+    "3.5",
+    "3.4"
   ],
   "version": 1
 }
diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py
index da5f2bd..4d10b77 100644
--- a/benchmarks/benchmarks.py
+++ b/benchmarks/benchmarks.py
@@ -61,4 +61,5 @@ class BenchmarkSuite:
         dna_seq.gc_content()
 
     def time_search_for_motif_in_gapped(self):
-        dna_seq.find_with_regex(motif_1_regex, ignore=dna_seq.gaps())
+        consume_iterator(
+            dna_seq.find_with_regex(motif_1_regex, ignore=dna_seq.gaps()))
diff --git a/checklist.py b/checklist.py
index d5856c2..3daa4cd 100755
--- a/checklist.py
+++ b/checklist.py
@@ -8,8 +8,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import collections
 import os
 import os.path
@@ -20,6 +18,10 @@ import tokenize
 
 import dateutil.parser
 
+if sys.version_info.major != 3:
+    sys.exit("scikit-bio can only be used with Python 3. You are currently "
+             "running Python %d." % sys.version_info.major)
+
 
 def main():
     """Go on a power trip by nitpicking the scikit-bio repo.
@@ -39,7 +41,7 @@ def main():
     root = 'skbio'
     validators = [InitValidator(), CopyrightHeadersValidator(),
                   ExecPermissionValidator(), GeneratedCythonValidator(),
-                  APIRegressionValidator(), FluxCapacitorValidator()]
+                  APIRegressionValidator()]
 
     return_code = 0
     for validator in validators:
@@ -53,7 +55,7 @@ def main():
     return return_code
 
 
-class RepoValidator(object):
+class RepoValidator:
     """Abstract base class representing a repository validator.
 
     Subclasses must override and implement ``_validate`` (see its docstring for
@@ -450,32 +452,5 @@ class APIRegressionValidator(RepoValidator):
         return skbio_imports
 
 
-class FluxCapacitorValidator(RepoValidator):
-    """Ensure that the __future__ statements are fluxing correctly"""
-    reason = ("These files do not have the following import at the start:\n\n"
-              "from __future__ import absolute_import, division,"
-              " print_function\n")
-
-    def _validate(self, root, dirs, files):
-        failures = []
-        expected = {"absolute_import", "division", "print_function"}
-        for file in files:
-            if file.endswith(".py"):
-                filename = os.path.join(root, file)
-                failures.append(filename)
-                with open(filename) as f:
-                    source = ast.parse(f.read())
-                    for node, _ in zip(ast.iter_child_nodes(source), range(2)):
-                        if isinstance(node, ast.Expr):
-                            continue
-                        if isinstance(node, ast.ImportFrom):
-                            if node.module == "__future__":
-                                if expected.issubset(
-                                        {n.name for n in node.names}):
-                                    failures.pop()
-                            break
-        return failures
-
-
 if __name__ == '__main__':
     sys.exit(main())
diff --git a/ci/conda_requirements.txt b/ci/conda_requirements.txt
index 5cc2170..95213e7 100644
--- a/ci/conda_requirements.txt
+++ b/ci/conda_requirements.txt
@@ -6,8 +6,6 @@ pandas
 nose
 pep8
 ipython
-future
-six
 pyflakes
 flake8
 python-dateutil
diff --git a/ci/pip_requirements.txt b/ci/pip_requirements.txt
index d14cee3..82b1d65 100644
--- a/ci/pip_requirements.txt
+++ b/ci/pip_requirements.txt
@@ -1,6 +1,4 @@
 HTTPretty >= 0.8.14
-bz2file
-contextlib2
 coveralls
 natsort
 lockfile
diff --git a/doc/README.md b/doc/README.md
index ff3f3b4..6c9a594 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -135,7 +135,7 @@ After listing public module members, we encourage a usage example section
 showing how to use some of the module's functionality. Examples should be
 written in [doctest](http://docs.python.org/3/library/doctest.html) format so
 that they can be automatically tested (e.g., using ```make test``` or
-```python -m skbio.test```). Doctests should be written in Python 3.
+```python -m skbio.test```).
 
     Examples
     --------
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 14e826d..03e7774 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -7,6 +7,10 @@ import os
 import types
 import re
 
+if sys.version_info.major != 3:
+    raise RuntimeError("scikit-bio can only be used with Python 3. You are "
+                       "currently running Python %d." % sys.version_info.major)
+
 # Force matplotlib to not use any Xwindows backend.
 import matplotlib
 matplotlib.use('Agg')
@@ -58,7 +62,7 @@ class NewAuto(autosummary.Autosummary):
                 return specials[display_name], '', summary, real_name
             return display_name, sig, summary, real_name
 
-        skip = ['__nonzero__']
+        skip = []
 
         return [fix_item(*e) for e in super(NewAuto, self).get_items(names)
                 if e[0] not in skip]
@@ -417,9 +421,6 @@ plot_include_source = True
 plot_formats = [('png', 96), ]
 #plot_html_show_formats = False
 
-import math
-phi = (math.sqrt(5) + 1)/2
-
 font_size = 13*72/96.0  # 13 px
 
 plot_rcparams = {
@@ -429,12 +430,11 @@ plot_rcparams = {
     'xtick.labelsize': font_size,
     'ytick.labelsize': font_size,
     'legend.fontsize': font_size,
-    'figure.figsize': (3*phi, 3),
     'figure.subplot.bottom': 0.2,
     'figure.subplot.left': 0.2,
     'figure.subplot.right': 0.9,
-    'figure.subplot.top': 0.85,
-    'figure.subplot.wspace': 0.4,
+    'figure.subplot.top': 0.9,
+    'figure.subplot.wspace': 0.2,
     'text.usetex': False,
 
     # Some of our figures have legends outside the axes area. When they're
@@ -445,9 +445,7 @@ plot_rcparams = {
     'savefig.bbox': 'tight'
 }
 
-if not use_matplotlib_plot_directive:
-    import matplotlib
-    matplotlib.rcParams.update(plot_rcparams)
+matplotlib.rcParams.update(plot_rcparams)
 
 # -----------------------------------------------------------------------------
 # Intersphinx configuration
diff --git a/doc/source/development/coding_guidelines.rst b/doc/source/development/coding_guidelines.rst
index 2bafd17..9cf4f7c 100644
--- a/doc/source/development/coding_guidelines.rst
+++ b/doc/source/development/coding_guidelines.rst
@@ -188,6 +188,7 @@ The structure of your module should be similar to the example below. scikit-bio
        Numbers
 
     """
+
     # ----------------------------------------------------------------------------
     # Copyright (c) 2013--, scikit-bio development team.
     #
@@ -196,16 +197,18 @@ The structure of your module should be similar to the example below. scikit-bio
     # The full license is in the file COPYING.txt, distributed with this software.
     # ----------------------------------------------------------------------------
 
-    from __future__ import absolute_import, division, print_function
+    from random import choice, random
 
     import numpy as np
-    from random import choice, random
     from utils import indices
 
+
     class Numbers(list):
-        pass    # much code deleted
+        pass
+
+
     class FrequencyDistribution(dict):
-        pass    # much code deleted
+        pass
 
 
 How should I write comments?
@@ -309,8 +312,6 @@ Example of a ``nose`` test module structure
     # The full license is in the file COPYING.txt, distributed with this software.
     # ----------------------------------------------------------------------------
 
-    from __future__ import absolute_import, division, print_function
-
     import numpy as np
     from nose.tools import assert_almost_equal, assert_raises
 
@@ -367,8 +368,8 @@ After doing this you should see your name and e-mail when you run the following
     $ git config --global user.email
     yoshiki89 at gmail.com
 
-Writting a commit message
-^^^^^^^^^^^^^^^^^^^^^^^^^
+Writing a commit message
+^^^^^^^^^^^^^^^^^^^^^^^^
 
 In general the writing of a commit message should adhere to `NumPy's guidelines`_ which if followed correctly will help you structure your changes better i. e. bug fixes will be in a commit followed by a commit updating the test suite and with one last commit that update the documentation as needed.
 
diff --git a/doc/source/development/new_module.rst b/doc/source/development/new_module.rst
index 297b987..d91a8b3 100644
--- a/doc/source/development/new_module.rst
+++ b/doc/source/development/new_module.rst
@@ -22,8 +22,6 @@ like this::
   # The full license is in the file COPYING.txt, distributed with this software.
   # ----------------------------------------------------------------------------
 
-  from __future__ import absolute_import, division, print_function
-
   from skbio.util import TestRunner
   test = TestRunner(__file__).test
 
@@ -45,7 +43,5 @@ necessary so that all tests can be run after installation)::
   # The full license is in the file COPYING.txt, distributed with this software.
   # ----------------------------------------------------------------------------
 
-  from __future__ import absolute_import, division, print_function
-
 Finally, remember to also follow the `documentation guidelines
 <https://github.com/biocore/scikit-bio/blob/master/doc/README.md#documenting-a-module-in-scikit-bio>`_.
diff --git a/doc/source/development/py3.rst b/doc/source/development/py3.rst
deleted file mode 100644
index 98fcb18..0000000
--- a/doc/source/development/py3.rst
+++ /dev/null
@@ -1,366 +0,0 @@
-Supporting Python 2 and Python 3
-################################
-
-skbio simultaneously supports Python 2.7 and 3.3+ by writing code that
-works unchanged in both major versions.
-
-As a compatibility layer, we're using the `future <http://python-future.org/>`_
-and `six <https://pypi.python.org/pypi/six>`_ projects. future "allows you to
-use a single, clean Python 3.x-compatible codebase to support both Python 2 and
-Python 3 with minimal overhead". It includes functionality from "six, IPython,
-Jinja2, Django, and Pandas". Recent versions of the future project stopped
-bundling the six library, so we also directly depend on six (e.g., for StringIO
-compatibility).
-
-So far, these notes are based on issues that have appeared when porting
-skbio, so it is not a complete guide. Refer to the `official porting
-guide <https://docs.python.org/3/howto/pyporting.html>`_ and the
-`python-future docs <http://python-future.org/>`_ for more
-information.
-
-Importing __future__
-====================
-
-For consistency across versions, every Python file should start with
-the following imports::
-
-  # ----------------------------------------------------------------------------
-  # Copyright (c) 2013--, scikit-bio development team.
-  #
-  # Distributed under the terms of the Modified BSD License.
-  #
-  # The full license is in the file COPYING.txt, distributed with this software.
-  # ----------------------------------------------------------------------------
-
-  from __future__ import absolute_import, division, print_function
-
-Iterators
-=========
-
-Builtins
---------
-
-Builtin iterators in Python 2 usually return lists, and have an
-alternative that returns an iterator (i.e., `range` and `xrange`,
-`items` and `iteritems`). In Python 3, only the iterator version
-exists but it uses the list-returning name (i.e., `range` and
-`items`).
-
-When iterating over the resulting object, the recommended approach
-depends on efficiency concerns:
-
-- If iteration only happens over a few items, you can use the
-  functions that exist both in Python 2 and Python 3.
-
-- If the number of iterations can be large and efficiency is
-  important, use the future package.
-
-+--------------------+----------------------------+--------------------+
-|Small # of          |Efficient versions          |Notes               |
-|iterations (returns |(always iterators)          |                    |
-|lists in py2,       |                            |                    |
-|iterators in py3)   |                            |                    |
-+--------------------+----------------------------+--------------------+
-|`zip`               |`future.builtins.zip`       |                    |
-+--------------------+----------------------------+--------------------+
-|`range`             |`future.builtins.range`     |                    |
-+--------------------+----------------------------+--------------------+
-|`map`               |`future.builtins.map`       |Prefer lists        |
-|                    |                            |comprehensions or   |
-|                    |                            |for loops in        |
-|                    |                            |general. Avoid      |
-|                    |                            |calling functions   |
-|                    |                            |that cause side     |
-|                    |                            |effects when using  |
-|                    |                            |map. Gotcha: Py3's  |
-|                    |                            |`map` stops when the|
-|                    |                            |shortest iterable is|
-|                    |                            |exhausted, but Py2's|
-|                    |                            |pads them with      |
-|                    |                            |`None` till the     |
-|                    |                            |longest iterable is |
-|                    |                            |exhausted.          |
-|                    |                            |                    |
-+--------------------+----------------------------+--------------------+
-|`filter`            |`future.builtins.filter`    |                    |
-|                    |                            |                    |
-|                    |                            |                    |
-+--------------------+----------------------------+--------------------+
-|`functools.reduce`  |`functools.reduce`          |Avoid using the     |
-|                    |                            |global reduce       |
-|                    |                            |available in Py2 (it|
-|                    |                            |is the same as the  |
-|                    |                            |`functools` one)    |
-+--------------------+----------------------------+--------------------+
-|`d.items()`         |`future.utils.viewitems(d)` |Efficient iteration |
-|                    |                            |over d *and*        |
-|                    |                            |set-like behaviour  |
-+--------------------+----------------------------+--------------------+
-|`d.values()`        |`future.utils.viewvalues(d)`|Efficient iteration |
-|                    |                            |over d *and*        |
-|                    |                            |set-like behaviour  |
-+--------------------+----------------------------+--------------------+
-|`d.keys()`          |`future.utils.viewkeys(d)`  |Hardly ever needed, |
-|                    |                            |as iterating over a |
-|                    |                            |dictionary yields   |
-|                    |                            |keys (thus sorted(d)|
-|                    |                            |returns the sorted  |
-|                    |                            |keys).              |
-+--------------------+----------------------------+--------------------+
-
-
-When not directly iterating over an iterator, don't write code that
-relies on list-like behaviour: you may need to cast it explicitly. The
-following snippets show some possible issues::
-
-    a = zip(...)
-    b = zip(...)
-    c = a + b  # succeeds in Py2 (list concatenation), TypeError in Py3
-
-::
-
-    s = map(int, range(2))
-    1 in s  # True (membership testing in a list is an O(n) bad idea)
-    0 in s  # True in Py2, False in Py3
-
-In Py2, `s` is a list, so clearly `(1 in [0, 1]) == True` and `(0 in
-[0, 1]) == True`. In Py3, `s` is an iterator and the items it yields
-are discarded. Let's see an example with a generator to try and make
-it more clear::
-
-    >>> s = ((i, print(i)) for i in [0, 1, 2])  # print will let us see the iteration
-    >>> (1, None) in s  # Starts iterating over s...
-    0
-    1                   # ...till it finds (1, None)
-    True
-    >>> (0, None) in s  # Continues iterating over s
-    2                   # s is exhausted
-    False               # but (0, None) isn't there
-
-
-Advancing an iterator
----------------------
-
-Always use the next function, which is available from Python 2.6
-onwards. Never call the next method, which doesn't exist in Py3.
-
-Implementing new iterators
---------------------------
-
-Implement the `__next__` special method, like in Py3, and decorate the
-class::
-
-    from future.utils import implements_iterator
-
-    @implements_iterator
-    class ParameterIterBase(object):
-    def __next__(self):
-        return next(self._generator)
-
-It is also possible to subclass from `future.builtins.object`. In this
-case, no decorator is needed.
-
-Changes in the standard library
-===============================
-
-To deal with modules that live under a different place, future
-provides a context manager::
-
-    # Example from future's documentation
-    from future import standard_library
-
-    with standard_library.hooks():
-        from http.client import HttpConnection
-        from itertools import filterfalse
-        import html.parser
-        import queue
-
-StringIO and BytesIO
---------------------
-
-In Py2 there are three flavours of StringIO: a pure Python module
-(StringIO), an accelerated version (cStringIO), and another one in the
-io module. They all behave in a slightly different way, with different
-memory and performance characteristics. So far, we're using::
-
-    from six import StringIO
-
-It refers to `io.StringIO` in Py3, and `StringIO.StringIO` in Py2.
-
-If you need a binary file-like object (see the Text vs bytes section),
-use `six.BytesIO`, which refers to `io.BytesIO` in Py3, and `StringIO.StringIO`
-in Py2.
-
-Text vs bytes
-=============
-
-This is a fundamental change between Py2 and Py3. It is very important
-to always distinguish text from bytes.
-
-String literals that are to be treated as bytes need the `b`
-prefix. String literals that are text need either the `u` prefix or
-`from __future__ import unicode_literals` at the top.
-
-A brief introduction: Unicode, UTF-8, ASCII...
-----------------------------------------------
-
-A string can be seen as a sequence of characters. According to the
-Unicode standard, each character is represented by a code point (a
-number). For example, character `ñ` is represented by the Unicode code
-point `U+00F1`. Code points are still abstract and can be stored in a
-number of ways, including even little or big endian formats. There are
-many encodings that map code points to byte values (encode) and back
-(decode). Three important ones are ASCII, UTF-8 and latin-1:
-
-- ASCII is a 7 bit encoding that can handle a very limited range of
-  Unicode code points (not even the one corresponding to character
-  `ñ`).
-
-- UTF-8 is an encoding that can represent every Unicode character. It
-  is ASCII-compatible because code points that can also be represented
-  by ASCII are mapped to the same byte value by UTF-8 and ASCII. `ñ`
-  is represented by the byte sequence `\xC3\xB1`.
-
-- latin-1 is an ASCII-compatible 8 bit encoding that maps the first
-  256 Unicode code points to their byte values. That is, the Unicode
-  code point `U+00F1` (character `ñ`) is directly encoded as `0xF1` in
-  latin-1. The Py2 `str` type loosely worked by assuming everything
-  was encoded in latin-1.
-
-
-Text processing
----------------
-
-    There Ain't No Such Thing As Plain Text.  -- Joel Spolsky, `The
-    Absolute Minimum Every Software Developer Absolutely, Positively
-    Must Know About Unicode and Character Sets (No Excuses!)
-    <http://www.joelonsoftware.com/articles/Unicode.html>`_, 2003.
-
-After going through Nick Coghlan's `"Processing Text Files in Python
-3"
-<https://ncoghlan_devs-python-notes.readthedocs.org/en/latest/python3/text_file_processing.html>`_
-I think the way forward is to process ASCII-like files (fasta, fastq)
-as binary files, and decode to strings some parts, if necessary. This
-is faster than processing them as text files, especially in Py3. In
-fact, it seems (from functions like `_phred_to_ascii*`) that these
-formats are in fact mixed binary and ASCII, which I think puts us in
-the same place as people dealing with `network protocols
-<https://ncoghlan_devs-python-notes.readthedocs.org/en/latest/python3/binary_protocols.html>`_:
-it's more cumbersome to do in Py3, especially before Python 3.5
-arrives, which will `reintroduce binary string interpolation
-<http://legacy.python.org/dev/peps/pep-0460/>`_).
-
-Gotchas
--------
-
-Comparing bytes and text strings always returns `False` in Python 3
-(as they're incompatible types, and comparisons are required to
-succeed by the language)::
-
-    >>> b'GATCAT' == 'GATCAT'
-    False
-
-Calling `str` on a bytes instance returns a string with the `b` prefix
-and quotes, which will give unexpected results when using string
-formatting::
-
-    >>> "Sequence {}".format(b'GATCAT')
-    "Sequence b'GATCAT'"
-
-If you actually want to construct a text string, bytes objects need to
-be *decoded* into text. For example::
-
-    >>> "Sequence {}".format(b'GATCAT'.decode('utf-8'))
-
-If you want to efficiently construct a byte string, the most
-convenient way may be to call `b''.join(iterable of byte strings)`,
-though there are other options like using `io.BytesIO` or
-`bytearray`. For a very small number of byte strings, it may be OK to
-use the `+` operator.
-
-Run python with the `-b` flag to detect these two bug-prone usages,
-and `-bb` to turn them into exceptions.
-
-Instance checking: basestring, str, unicode, bytes, long, int
-=============================================================
-
-Strings
--------
-
-When testing if a variable is a string use
-`six.string_types`. It refers to `basestring` in Py2 and `str` in Py3.
-`binary_type` and `text_type` are also available.
-
-Numbers
--------
-
-The `long` type no longer exists in Py3. To test if a number is an
-integer (`int` or `long` in Py2, `int` in Py3), compare it to
-the abstract base class `Integral`::
-
-    from numbers import Integral
-    isinstance(quality, Integral)
-
-Implementing comparisons
-========================
-
-If the class you're defining has a `total ordering
-<http://en.wikipedia.org/wiki/Total_order>`_, either use
-`functools.total_ordering
-<https://docs.python.org/2.7/library/functools.html#functools.total_ordering>`_
-or implement all rich comparison methods if comparison performance is
-a bottleneck. Don't implement `__cmp__`, which was removed in Py3.
-
-However, usually only equality is important and you should only define
-`__eq__`. While compatibility with Py2 is kept, `__ne__` needs to be
-implemented too::
-
-    def __ne__(self, other):
-        """Required in Py2."""
-        return not self == other
-
-Otherwise, using the operator `!=` will lead to unexpected results in
-Py2 because it will compare identity, not equality::
-
-    class Foo(object):
-        def __eq__(self, other):
-            return True
-
-    print(Foo() != Foo())
-
-That prints `True` in Py2 (because each instance has a different `id`)
-but prints `False` in Py3 (the opposite of what `__eq__` returns,
-which is the desired behaviour).
-
-Always test that both `==` and `!=` are behaving correctly, e.g.::
-
-    def test_eq(self):
-        gc_1 = GeneticCode(self.sgc)
-        gc_2 = GeneticCode(self.sgc)
-        self.assertEqual(gc_1, gc_2)
-
-    def test_ne(self):
-        gc_1 = GeneticCode(self.sgc)
-        gc_2 = GeneticCode(self.sgc)
-        # Explicitly using !=
-        self.assertFalse(gc_1 != gc_2)
-
-Other modules
-=============
-
-Numpy
------
-
-Try to avoid setting dtypes to a string (i.e., use `dtype=np.float64`
-instead of `dtype='float'`, etc). It is may be safe, but some warnings
-were raised when running Python with the `-b` flag. Also, field names
-in structured dtypes need to be bytes (`str` type) in Py2, but text
-(`str` type) in Py3 (`issue #2407
-<https://github.com/numpy/numpy/issues/2407>`_).
-
-Testing
-=======
-
-`unittest.assertEquals` is deprecated. Use `unittest.assertEqual`
-instead. The complete list of deprecated testing methods is `here
-<https://docs.python.org/3.4/library/unittest.html#deprecated-aliases>`_
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 57bd1fb..f076669 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -2,7 +2,7 @@ scikit-bio
 ==========
 
 scikit-bio (canonically pronounced *sigh-kit-buy-oh*) is a library for working
-with biological data in Python. scikit-bio is open source, BSD-licensed
+with biological data in Python 3. scikit-bio is open source, BSD-licensed
 software that is currently under active development.
 
 API Reference
@@ -39,6 +39,5 @@ to scikit-bio.
 .. toctree::
    :maxdepth: 1
 
-   development/py3
    development/coding_guidelines
    development/new_module
diff --git a/licenses/python.txt b/licenses/python.txt
deleted file mode 100644
index 832d272..0000000
--- a/licenses/python.txt
+++ /dev/null
@@ -1,270 +0,0 @@
-A. HISTORY OF THE SOFTWARE
-==========================
-
-Python was created in the early 1990s by Guido van Rossum at Stichting
-Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
-as a successor of a language called ABC.  Guido remains Python's
-principal author, although it includes many contributions from others.
-
-In 1995, Guido continued his work on Python at the Corporation for
-National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
-in Reston, Virginia where he released several versions of the
-software.
-
-In May 2000, Guido and the Python core development team moved to
-BeOpen.com to form the BeOpen PythonLabs team.  In October of the same
-year, the PythonLabs team moved to Digital Creations (now Zope
-Corporation, see http://www.zope.com).  In 2001, the Python Software
-Foundation (PSF, see http://www.python.org/psf/) was formed, a
-non-profit organization created specifically to own Python-related
-Intellectual Property.  Zope Corporation is a sponsoring member of
-the PSF.
-
-All Python releases are Open Source (see http://www.opensource.org for
-the Open Source Definition).  Historically, most, but not all, Python
-releases have also been GPL-compatible; the table below summarizes
-the various releases.
-
-    Release         Derived     Year        Owner       GPL-
-                    from                                compatible? (1)
-
-    0.9.0 thru 1.2              1991-1995   CWI         yes
-    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes
-    1.6             1.5.2       2000        CNRI        no
-    2.0             1.6         2000        BeOpen.com  no
-    1.6.1           1.6         2001        CNRI        yes (2)
-    2.1             2.0+1.6.1   2001        PSF         no
-    2.0.1           2.0+1.6.1   2001        PSF         yes
-    2.1.1           2.1+2.0.1   2001        PSF         yes
-    2.2             2.1.1       2001        PSF         yes
-    2.1.2           2.1.1       2002        PSF         yes
-    2.1.3           2.1.2       2002        PSF         yes
-    2.2.1           2.2         2002        PSF         yes
-    2.2.2           2.2.1       2002        PSF         yes
-    2.2.3           2.2.2       2003        PSF         yes
-    2.3             2.2.2       2002-2003   PSF         yes
-    2.3.1           2.3         2002-2003   PSF         yes
-    2.3.2           2.3.1       2002-2003   PSF         yes
-    2.3.3           2.3.2       2002-2003   PSF         yes
-    2.3.4           2.3.3       2004        PSF         yes
-    2.3.5           2.3.4       2005        PSF         yes
-    2.4             2.3         2004        PSF         yes
-    2.4.1           2.4         2005        PSF         yes
-    2.4.2           2.4.1       2005        PSF         yes
-    2.4.3           2.4.2       2006        PSF         yes
-    2.5             2.4         2006        PSF         yes
-    2.7             2.6         2010        PSF         yes
-
-Footnotes:
-
-(1) GPL-compatible doesn't mean that we're distributing Python under
-    the GPL.  All Python licenses, unlike the GPL, let you distribute
-    a modified version without making your changes open source.  The
-    GPL-compatible licenses make it possible to combine Python with
-    other software that is released under the GPL; the others don't.
-
-(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
-    because its license has a choice of law clause.  According to
-    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
-    is "not incompatible" with the GPL.
-
-Thanks to the many outside volunteers who have worked under Guido's
-direction to make these releases possible.
-
-
-B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
-===============================================================
-
-PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
---------------------------------------------
-
-1. This LICENSE AGREEMENT is between the Python Software Foundation
-("PSF"), and the Individual or Organization ("Licensee") accessing and
-otherwise using this software ("Python") in source or binary form and
-its associated documentation.
-
-2. Subject to the terms and conditions of this License Agreement, PSF
-hereby grants Licensee a nonexclusive, royalty-free, world-wide
-license to reproduce, analyze, test, perform and/or display publicly,
-prepare derivative works, distribute, and otherwise use Python
-alone or in any derivative version, provided, however, that PSF's
-License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
-2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
-Reserved" are retained in Python alone or in any derivative version
-prepared by Licensee.
-
-3. In the event Licensee prepares a derivative work that is based on
-or incorporates Python or any part thereof, and wants to make
-the derivative work available to others as provided herein, then
-Licensee hereby agrees to include in any such work a brief summary of
-the changes made to Python.
-
-4. PSF is making Python available to Licensee on an "AS IS"
-basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
-IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
-DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
-FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
-INFRINGE ANY THIRD PARTY RIGHTS.
-
-5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
-FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
-A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
-OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-
-6. This License Agreement will automatically terminate upon a material
-breach of its terms and conditions.
-
-7. Nothing in this License Agreement shall be deemed to create any
-relationship of agency, partnership, or joint venture between PSF and
-Licensee.  This License Agreement does not grant permission to use PSF
-trademarks or trade name in a trademark sense to endorse or promote
-products or services of Licensee, or any third party.
-
-8. By copying, installing or otherwise using Python, Licensee
-agrees to be bound by the terms and conditions of this License
-Agreement.
-
-
-BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
--------------------------------------------
-
-BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
-
-1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
-office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
-Individual or Organization ("Licensee") accessing and otherwise using
-this software in source or binary form and its associated
-documentation ("the Software").
-
-2. Subject to the terms and conditions of this BeOpen Python License
-Agreement, BeOpen hereby grants Licensee a non-exclusive,
-royalty-free, world-wide license to reproduce, analyze, test, perform
-and/or display publicly, prepare derivative works, distribute, and
-otherwise use the Software alone or in any derivative version,
-provided, however, that the BeOpen Python License is retained in the
-Software, alone or in any derivative version prepared by Licensee.
-
-3. BeOpen is making the Software available to Licensee on an "AS IS"
-basis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
-IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
-DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
-FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
-INFRINGE ANY THIRD PARTY RIGHTS.
-
-4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
-SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
-AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
-DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-
-5. This License Agreement will automatically terminate upon a material
-breach of its terms and conditions.
-
-6. This License Agreement shall be governed by and interpreted in all
-respects by the law of the State of California, excluding conflict of
-law provisions.  Nothing in this License Agreement shall be deemed to
-create any relationship of agency, partnership, or joint venture
-between BeOpen and Licensee.  This License Agreement does not grant
-permission to use BeOpen trademarks or trade names in a trademark
-sense to endorse or promote products or services of Licensee, or any
-third party.  As an exception, the "BeOpen Python" logos available at
-http://www.pythonlabs.com/logos.html may be used according to the
-permissions granted on that web page.
-
-7. By copying, installing or otherwise using the software, Licensee
-agrees to be bound by the terms and conditions of this License
-Agreement.
-
-
-CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
----------------------------------------
-
-1. This LICENSE AGREEMENT is between the Corporation for National
-Research Initiatives, having an office at 1895 Preston White Drive,
-Reston, VA 20191 ("CNRI"), and the Individual or Organization
-("Licensee") accessing and otherwise using Python 1.6.1 software in
-source or binary form and its associated documentation.
-
-2. Subject to the terms and conditions of this License Agreement, CNRI
-hereby grants Licensee a nonexclusive, royalty-free, world-wide
-license to reproduce, analyze, test, perform and/or display publicly,
-prepare derivative works, distribute, and otherwise use Python 1.6.1
-alone or in any derivative version, provided, however, that CNRI's
-License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
-1995-2001 Corporation for National Research Initiatives; All Rights
-Reserved" are retained in Python 1.6.1 alone or in any derivative
-version prepared by Licensee.  Alternately, in lieu of CNRI's License
-Agreement, Licensee may substitute the following text (omitting the
-quotes): "Python 1.6.1 is made available subject to the terms and
-conditions in CNRI's License Agreement.  This Agreement together with
-Python 1.6.1 may be located on the Internet using the following
-unique, persistent identifier (known as a handle): 1895.22/1013.  This
-Agreement may also be obtained from a proxy server on the Internet
-using the following URL: http://hdl.handle.net/1895.22/1013".
-
-3. In the event Licensee prepares a derivative work that is based on
-or incorporates Python 1.6.1 or any part thereof, and wants to make
-the derivative work available to others as provided herein, then
-Licensee hereby agrees to include in any such work a brief summary of
-the changes made to Python 1.6.1.
-
-4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
-basis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
-IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
-DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
-FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
-INFRINGE ANY THIRD PARTY RIGHTS.
-
-5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
-1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
-A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
-OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-
-6. This License Agreement will automatically terminate upon a material
-breach of its terms and conditions.
-
-7. This License Agreement shall be governed by the federal
-intellectual property law of the United States, including without
-limitation the federal copyright law, and, to the extent such
-U.S. federal law does not apply, by the law of the Commonwealth of
-Virginia, excluding Virginia's conflict of law provisions.
-Notwithstanding the foregoing, with regard to derivative works based
-on Python 1.6.1 that incorporate non-separable material that was
-previously distributed under the GNU General Public License (GPL), the
-law of the Commonwealth of Virginia shall govern this License
-Agreement only as to issues arising under or with respect to
-Paragraphs 4, 5, and 7 of this License Agreement.  Nothing in this
-License Agreement shall be deemed to create any relationship of
-agency, partnership, or joint venture between CNRI and Licensee.  This
-License Agreement does not grant permission to use CNRI trademarks or
-trade name in a trademark sense to endorse or promote products or
-services of Licensee, or any third party.
-
-8. By clicking on the "ACCEPT" button where indicated, or by copying,
-installing or otherwise using Python 1.6.1, Licensee agrees to be
-bound by the terms and conditions of this License Agreement.
-
-        ACCEPT
-
-
-CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
---------------------------------------------------
-
-Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
-The Netherlands.  All rights reserved.
-
-Permission to use, copy, modify, and distribute this software and its
-documentation for any purpose and without fee is hereby granted,
-provided that the above copyright notice appear in all copies and that
-both that copyright notice and this permission notice appear in
-supporting documentation, and that the name of Stichting Mathematisch
-Centrum or CWI not be used in advertising or publicity pertaining to
-distribution of the software without specific, written prior
-permission.
-
-STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
-THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
-FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/setup.py b/setup.py
index 539697f..e2c5aa6 100644
--- a/setup.py
+++ b/setup.py
@@ -12,21 +12,17 @@ import os
 import platform
 import re
 import ast
+import sys
+
 from setuptools import find_packages, setup
 from setuptools.extension import Extension
-from setuptools.command.build_ext import build_ext as _build_ext
+
+import numpy as np
 
 
-# Bootstrap setup.py with numpy
-# Huge thanks to coldfix's solution
-# http://stackoverflow.com/a/21621689/579416
-class build_ext(_build_ext):
-    def finalize_options(self):
-        _build_ext.finalize_options(self)
-        # Prevent numpy from thinking it is still in its setup process:
-        __builtins__.__NUMPY_SETUP__ = False
-        import numpy
-        self.include_dirs.append(numpy.get_include())
+if sys.version_info.major != 3:
+    sys.exit("scikit-bio can only be used with Python 3. You are currently "
+             "running Python %d." % sys.version_info.major)
 
 # version parsing from __init__ pulled from Flask's setup.py
 # https://github.com/mitsuhiko/flask/blob/master/setup.py
@@ -42,12 +38,10 @@ classes = """
     Topic :: Software Development :: Libraries
     Topic :: Scientific/Engineering
     Topic :: Scientific/Engineering :: Bio-Informatics
-    Programming Language :: Python
-    Programming Language :: Python :: 2
-    Programming Language :: Python :: 2.7
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.3
+    Programming Language :: Python :: 3 :: Only
     Programming Language :: Python :: 3.4
+    Programming Language :: Python :: 3.5
     Operating System :: Unix
     Operating System :: POSIX
     Operating System :: MacOS :: MacOS X
@@ -104,22 +98,17 @@ setup(name='scikit-bio',
       url='http://scikit-bio.org',
       packages=find_packages(),
       ext_modules=extensions,
-      cmdclass={'build_ext': build_ext},
-      setup_requires=['numpy >= 1.9.2'],
+      include_dirs=[np.get_include()],
       install_requires=[
-          'bz2file >= 0.98',
-          'lockfile >= 0.10.2',
+          'lockfile >= 0.10.2',  # req'd for our usage of CacheControl
           'CacheControl >= 0.11.5',
-          'contextlib2 >= 0.4.0',
           'decorator >= 3.4.2',
-          'future >= 0.14.3',
           'IPython >= 3.2.0',
           'matplotlib >= 1.4.3',
           'natsort >= 4.0.3',
           'numpy >= 1.9.2',
-          'pandas >= 0.17.0',
+          'pandas >= 0.18.0',
           'scipy >= 0.15.1',
-          'six >= 1.9.0',
           'nose >= 1.3.7'
       ],
       classifiers=classifiers,
diff --git a/skbio/__init__.py b/skbio/__init__.py
index ebc5fe2..b2e7a78 100644
--- a/skbio/__init__.py
+++ b/skbio/__init__.py
@@ -6,7 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 
 from skbio.util import TestRunner
 
@@ -18,7 +17,7 @@ from skbio.stats.distance import DistanceMatrix
 from skbio.alignment import local_pairwise_align_ssw, TabularMSA
 from skbio.tree import TreeNode, nj
 from skbio.io import read, write
-from skbio._base import OrdinationResults
+from skbio.stats.ordination import OrdinationResults
 
 
 __all__ = ['Sequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
@@ -26,7 +25,7 @@ __all__ = ['Sequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
            'TreeNode', 'nj', 'read', 'write', 'OrdinationResults']
 
 __credits__ = "https://github.com/biocore/scikit-bio/graphs/contributors"
-__version__ = "0.4.2"
+__version__ = "0.5.0"
 
 mottos = [
     # 03/15/2014
diff --git a/skbio/_base.py b/skbio/_base.py
index aac8289..40e1307 100644
--- a/skbio/_base.py
+++ b/skbio/_base.py
@@ -6,27 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import with_metaclass
-from future.builtins import zip
-
 import abc
-import copy
-import functools
-
-import numpy as np
-import pandas as pd
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-from mpl_toolkits.mplot3d import Axes3D  # noqa
-from IPython.core.pylabtools import print_figure
-from IPython.core.display import Image, SVG
-
-from skbio.stats._misc import _pprint_strs
-from skbio.util._decorator import stable, experimental
 
 
-class SkbioObject(with_metaclass(abc.ABCMeta, object)):
+class SkbioObject(metaclass=abc.ABCMeta):
     """Abstract base class defining core API common to all scikit-bio objects.
 
     Public scikit-bio classes should subclass this class to ensure a common,
@@ -36,790 +19,10 @@ class SkbioObject(with_metaclass(abc.ABCMeta, object)):
     """
     @abc.abstractmethod
     def __str__(self):
-        pass
-
-
-class MetadataMixin(with_metaclass(abc.ABCMeta, object)):
-    @property
-    @stable(as_of="0.4.0")
-    def metadata(self):
-        """``dict`` containing metadata which applies to the entire object.
-
-        Notes
-        -----
-        This property can be set and deleted. When setting new metadata a
-        shallow copy of the dictionary is made.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with metadata share a common interface for
-           accessing and manipulating their metadata. The following examples
-           use scikit-bio's ``Sequence`` class to demonstrate metadata
-           behavior. These examples apply to all other scikit-bio objects
-           storing metadata.
-
-        Create a sequence with metadata:
-
-        >>> from pprint import pprint
-        >>> from skbio import Sequence
-        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id',
-        ...                                  'description': 'seq description'})
-
-        Retrieve metadata:
-
-        >>> pprint(seq.metadata) # using pprint to display dict in sorted order
-        {'description': 'seq description', 'id': 'seq-id'}
-
-        Update metadata:
-
-        >>> seq.metadata['id'] = 'new-id'
-        >>> seq.metadata['pubmed'] = 12345
-        >>> pprint(seq.metadata)
-        {'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
-
-        Set metadata:
-
-        >>> seq.metadata = {'abc': 123}
-        >>> seq.metadata
-        {'abc': 123}
-
-        Delete metadata:
-
-        >>> seq.has_metadata()
-        True
-        >>> del seq.metadata
-        >>> seq.metadata
-        {}
-        >>> seq.has_metadata()
-        False
-
-        """
-        if self._metadata is None:
-            # Not using setter to avoid copy.
-            self._metadata = {}
-        return self._metadata
-
-    @metadata.setter
-    def metadata(self, metadata):
-        if not isinstance(metadata, dict):
-            raise TypeError("metadata must be a dict")
-        # Shallow copy.
-        self._metadata = metadata.copy()
-
-    @metadata.deleter
-    def metadata(self):
-        self._metadata = None
-
-    @abc.abstractmethod
-    def __init__(self, metadata=None):
-        pass
-
-    def _init_(self, metadata=None):
-        if metadata is None:
-            self._metadata = None
-        else:
-            self.metadata = metadata
-
-    @abc.abstractmethod
-    def __eq__(self, other):
-        pass
-
-    def _eq_(self, other):
-        # We're not simply comparing self.metadata to other.metadata in order
-        # to avoid creating "empty" metadata representations on the objects if
-        # they don't have metadata.
-        if self.has_metadata() and other.has_metadata():
-            if self.metadata != other.metadata:
-                return False
-        elif not (self.has_metadata() or other.has_metadata()):
-            # Both don't have metadata.
-            pass
-        else:
-            # One has metadata while the other does not.
-            return False
-
-        return True
-
-    @abc.abstractmethod
-    def __ne__(self, other):
-        pass
-
-    def _ne_(self, other):
-        return not (self == other)
-
-    @abc.abstractmethod
-    def __copy__(self):
-        pass
-
-    def _copy_(self):
-        if self.has_metadata():
-            return self.metadata.copy()
-        else:
-            return None
-
-    @abc.abstractmethod
-    def __deepcopy__(self, memo):
-        pass
-
-    def _deepcopy_(self, memo):
-        if self.has_metadata():
-            return copy.deepcopy(self.metadata, memo)
-        else:
-            return None
-
-    @stable(as_of="0.4.0")
-    def has_metadata(self):
-        """Determine if the object has metadata.
-
-        An object has metadata if its ``metadata`` dictionary is not empty
-        (i.e., has at least one key-value pair).
-
-        Returns
-        -------
-        bool
-            Indicates whether the object has metadata.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with metadata share a common interface for
-           accessing and manipulating their metadata. The following examples
-           use scikit-bio's ``Sequence`` class to demonstrate metadata
-           behavior. These examples apply to all other scikit-bio objects
-           storing metadata.
-
-        >>> from skbio import Sequence
-        >>> seq = Sequence('ACGT')
-        >>> seq.has_metadata()
-        False
-        >>> seq = Sequence('ACGT', metadata={})
-        >>> seq.has_metadata()
-        False
-        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id'})
-        >>> seq.has_metadata()
-        True
-
-        """
-        return self._metadata is not None and bool(self.metadata)
-
-
-class PositionalMetadataMixin(with_metaclass(abc.ABCMeta, object)):
-    @abc.abstractmethod
-    def _positional_metadata_axis_len_(self):
-        """Return length of axis that positional metadata applies to.
-
-        Returns
-        -------
-        int
-            Positional metadata axis length.
-
-        """
-        pass
-
-    @property
-    @stable(as_of="0.4.0")
-    def positional_metadata(self):
-        """``pd.DataFrame`` containing metadata along an axis.
-
-        Notes
-        -----
-        This property can be set and deleted. When setting new positional
-        metadata a shallow copy is made.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with positional metadata share a common
-           interface for accessing and manipulating their positional metadata.
-           The following examples use scikit-bio's ``DNA`` class to demonstrate
-           positional metadata behavior. These examples apply to all other
-           scikit-bio objects storing positional metadata.
-
-        Create a DNA sequence with positional metadata:
-
-        >>> from skbio import DNA
-        >>> seq = DNA(
-        ...     'ACGT',
-        ...     positional_metadata={'quality': [3, 3, 20, 11],
-        ...                          'exons': [True, True, False, True]})
-        >>> seq
-        DNA
-        -----------------------------
-        Positional metadata:
-            'exons': <dtype: bool>
-            'quality': <dtype: int64>
-        Stats:
-            length: 4
-            has gaps: False
-            has degenerates: False
-            has non-degenerates: True
-            GC-content: 50.00%
-        -----------------------------
-        0 ACGT
-
-        Retrieve positional metadata:
-
-        >>> seq.positional_metadata
-           exons  quality
-        0   True        3
-        1   True        3
-        2  False       20
-        3   True       11
-
-        Update positional metadata:
-
-        >>> seq.positional_metadata['gaps'] = seq.gaps()
-        >>> seq.positional_metadata
-           exons  quality   gaps
-        0   True        3  False
-        1   True        3  False
-        2  False       20  False
-        3   True       11  False
-
-        Set positional metadata:
-
-        >>> seq.positional_metadata = {'degenerates': seq.degenerates()}
-        >>> seq.positional_metadata
-          degenerates
-        0       False
-        1       False
-        2       False
-        3       False
-
-        Delete positional metadata:
-
-        >>> seq.has_positional_metadata()
-        True
-        >>> del seq.positional_metadata
-        >>> seq.positional_metadata
-        Empty DataFrame
-        Columns: []
-        Index: [0, 1, 2, 3]
-        >>> seq.has_positional_metadata()
-        False
-
-        """
-        if self._positional_metadata is None:
-            # Not using setter to avoid copy.
-            self._positional_metadata = pd.DataFrame(
-                index=np.arange(self._positional_metadata_axis_len_()))
-        return self._positional_metadata
-
-    @positional_metadata.setter
-    def positional_metadata(self, positional_metadata):
-        try:
-            # Pass copy=True to copy underlying data buffer.
-            positional_metadata = pd.DataFrame(positional_metadata, copy=True)
-        except pd.core.common.PandasError as e:
-            raise TypeError(
-                "Invalid positional metadata. Must be consumable by "
-                "`pd.DataFrame` constructor. Original pandas error message: "
-                "\"%s\"" % e)
-
-        num_rows = len(positional_metadata.index)
-        axis_len = self._positional_metadata_axis_len_()
-        if num_rows != axis_len:
-            raise ValueError(
-                "Number of positional metadata values (%d) must match the "
-                "positional metadata axis length (%d)."
-                % (num_rows, axis_len))
-
-        positional_metadata.reset_index(drop=True, inplace=True)
-        self._positional_metadata = positional_metadata
-
-    @positional_metadata.deleter
-    def positional_metadata(self):
-        self._positional_metadata = None
-
-    @abc.abstractmethod
-    def __init__(self, positional_metadata=None):
-        pass
-
-    def _init_(self, positional_metadata=None):
-        if positional_metadata is None:
-            self._positional_metadata = None
-        else:
-            self.positional_metadata = positional_metadata
-
-    @abc.abstractmethod
-    def __eq__(self, other):
-        pass
-
-    def _eq_(self, other):
-        # We're not simply comparing self.positional_metadata to
-        # other.positional_metadata in order to avoid creating "empty"
-        # positional metadata representations on the objects if they don't have
-        # positional metadata.
-        if self.has_positional_metadata() and other.has_positional_metadata():
-            if not self.positional_metadata.equals(other.positional_metadata):
-                return False
-        elif not (self.has_positional_metadata() or
-                  other.has_positional_metadata()):
-            # Both don't have positional metadata.
-            pass
-        else:
-            # One has positional metadata while the other does not.
-            return False
-
-        return True
-
-    @abc.abstractmethod
-    def __ne__(self, other):
-        pass
-
-    def _ne_(self, other):
-        return not (self == other)
-
-    @abc.abstractmethod
-    def __copy__(self):
-        pass
-
-    def _copy_(self):
-        if self.has_positional_metadata():
-            # deep=True makes a shallow copy of the underlying data buffer.
-            return self.positional_metadata.copy(deep=True)
-        else:
-            return None
-
-    @abc.abstractmethod
-    def __deepcopy__(self, memo):
-        pass
-
-    def _deepcopy_(self, memo):
-        if self.has_positional_metadata():
-            return copy.deepcopy(self.positional_metadata, memo)
-        else:
-            return None
-
-    @stable(as_of="0.4.0")
-    def has_positional_metadata(self):
-        """Determine if the object has positional metadata.
-
-        An object has positional metadata if its ``positional_metadata``
-        ``pd.DataFrame`` has at least one column.
-
-        Returns
-        -------
-        bool
-            Indicates whether the object has positional metadata.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with positional metadata share a common
-           interface for accessing and manipulating their positional metadata.
-           The following examples use scikit-bio's ``DNA`` class to demonstrate
-           positional metadata behavior. These examples apply to all other
-           scikit-bio objects storing positional metadata.
-
-        >>> import pandas as pd
-        >>> from skbio import DNA
-        >>> seq = DNA('ACGT')
-        >>> seq.has_positional_metadata()
-        False
-        >>> seq = DNA('ACGT', positional_metadata=pd.DataFrame(index=range(4)))
-        >>> seq.has_positional_metadata()
-        False
-        >>> seq = DNA('ACGT', positional_metadata={'quality': range(4)})
-        >>> seq.has_positional_metadata()
-        True
-
-        """
-        return (self._positional_metadata is not None and
-                len(self.positional_metadata.columns) > 0)
-
-
-class OrdinationResults(SkbioObject):
-    """Store ordination results, providing serialization and plotting support.
-
-    Stores various components of ordination results. Provides methods for
-    serializing/deserializing results, as well as generation of basic
-    matplotlib 3-D scatterplots. Will automatically display PNG/SVG
-    representations of itself within the IPython Notebook.
-
-    Attributes
-    ----------
-    short_method_name : str
-        Abbreviated ordination method name.
-    long_method_name : str
-        Ordination method name.
-    eigvals : pd.Series
-        The resulting eigenvalues.  The index corresponds to the ordination
-        axis labels
-    samples : pd.DataFrame
-        The position of the samples in the ordination space, row-indexed by the
-        sample id.
-    features : pd.DataFrame
-        The position of the features in the ordination space, row-indexed by
-        the feature id.
-    biplot_scores : pd.DataFrame
-        Correlation coefficients of the samples with respect to the features.
-    sample_constraints : pd.DataFrame
-        Site constraints (linear combinations of constraining variables):
-        coordinates of the sites in the space of the explanatory variables X.
-        These are the fitted site scores
-    proportion_explained : pd.Series
-        Proportion explained by each of the dimensions in the ordination space.
-        The index corresponds to the ordination axis labels
-    png
-    svg
-
-    See Also
-    --------
-    ca
-    cca
-    pcoa
-    rda
-    """
-    default_write_format = 'ordination'
-
-    @experimental(as_of="0.4.0")
-    def __init__(self, short_method_name, long_method_name, eigvals,
-                 samples, features=None, biplot_scores=None,
-                 sample_constraints=None, proportion_explained=None):
-
-        self.short_method_name = short_method_name
-        self.long_method_name = long_method_name
-
-        self.eigvals = eigvals
-        self.samples = samples
-        self.features = features
-        self.biplot_scores = biplot_scores
-        self.sample_constraints = sample_constraints
-        self.proportion_explained = proportion_explained
-
-    @experimental(as_of="0.4.0")
-    def __str__(self):
-        """Return a string representation of the ordination results.
-
-        String representation lists ordination results attributes and indicates
-        whether or not they are present. If an attribute is present, its
-        dimensions are listed. A truncated list of features and sample IDs are
-        included (if they are present).
-
-        Returns
-        -------
-        str
-            String representation of the ordination results.
-
-        .. shownumpydoc
-
-        """
-        lines = ['Ordination results:']
-        method = '%s (%s)' % (self.long_method_name, self.short_method_name)
-        lines.append(self._format_attribute(method, 'Method', str))
-
-        attrs = [(self.eigvals, 'Eigvals'),
-                 (self.proportion_explained, 'Proportion explained'),
-                 (self.features, 'Features'),
-                 (self.samples, 'Samples'),
-                 (self.biplot_scores, 'Biplot Scores'),
-                 (self.sample_constraints, 'Sample constraints')]
-        for attr, attr_label in attrs:
-            def formatter(e):
-                return 'x'.join(['%d' % s for s in e.shape])
-
-            lines.append(self._format_attribute(attr, attr_label, formatter))
-
-        lines.append(self._format_attribute(
-            self.features, 'Feature IDs',
-            lambda e: _pprint_strs(e.index.tolist())))
-        lines.append(self._format_attribute(
-            self.samples, 'Sample IDs',
-            lambda e: _pprint_strs(e.index.tolist())))
-
-        return '\n'.join(lines)
-
-    @experimental(as_of="0.4.0")
-    def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
-             title='', cmap=None, s=20):
-        """Create a 3-D scatterplot of ordination results colored by metadata.
-
-        Creates a 3-D scatterplot of the ordination results, where each point
-        represents a sample. Optionally, these points can be colored by
-        metadata (see `df` and `column` below).
-
-        Parameters
-        ----------
-        df : pd.DataFrame, optional
-            ``DataFrame`` containing sample metadata. Must be indexed by sample
-            ID, and all sample IDs in the ordination results must exist in the
-            ``DataFrame``. If ``None``, samples (i.e., points) will not be
-            colored by metadata.
-        column : str, optional
-            Column name in `df` to color samples (i.e., points in the plot) by.
-            Cannot have missing data (i.e., ``np.nan``). `column` can be
-            numeric or categorical. If numeric, all values in the column will
-            be cast to ``float`` and mapped to colors using `cmap`. A colorbar
-            will be included to serve as a legend. If categorical (i.e., not
-            all values in `column` could be cast to ``float``), colors will be
-            chosen for each category using evenly-spaced points along `cmap`. A
-            legend will be included. If ``None``, samples (i.e., points) will
-            not be colored by metadata.
-        axes : iterable of int, optional
-            Indices of sample coordinates to plot on the x-, y-, and z-axes.
-            For example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
-            PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
-            Must contain exactly three elements.
-        axis_labels : iterable of str, optional
-            Labels for the x-, y-, and z-axes. If ``None``, labels will be the
-            values of `axes` cast as strings.
-        title : str, optional
-            Plot title.
-        cmap : str or matplotlib.colors.Colormap, optional
-            Name or instance of matplotlib colormap to use for mapping `column`
-            values to colors. If ``None``, defaults to the colormap specified
-            in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
-            are recommended for categorical data, while sequential colormaps
-            (e.g., ``Greys``) are recommended for numeric data. See [1]_ for
-            these colormap classifications.
-        s : scalar or iterable of scalars, optional
-            Size of points. See matplotlib's ``Axes3D.scatter`` documentation
-            for more details.
-
-        Returns
-        -------
-        matplotlib.figure.Figure
-            Figure containing the scatterplot and legend/colorbar if metadata
-            were provided.
-
-        Raises
-        ------
-        ValueError
-            Raised on invalid input, including the following situations:
-
-            - there are not at least three dimensions to plot
-            - there are not exactly three values in `axes`, they are not
-              unique, or are out of range
-            - there are not exactly three values in `axis_labels`
-            - either `df` or `column` is provided without the other
-            - `column` is not in the ``DataFrame``
-            - sample IDs in the ordination results are not in `df` or have
-              missing data in `column`
-
-        See Also
-        --------
-        mpl_toolkits.mplot3d.Axes3D.scatter
-
-        Notes
-        -----
-        This method creates basic plots of ordination results, and is intended
-        to provide a quick look at the results in the context of metadata
-        (e.g., from within the IPython Notebook). For more customization and to
-        generate publication-quality figures, we recommend EMPeror [2]_.
-
-        References
-        ----------
-        .. [1] http://matplotlib.org/examples/color/colormaps_reference.html
-        .. [2] EMPeror: a tool for visualizing high-throughput microbial
-           community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
-           Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
-
-        Examples
-        --------
-        .. plot::
-
-           Define a distance matrix with four samples labelled A-D:
-
-           >>> from skbio import DistanceMatrix
-           >>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
-           ...                      [0.21712454, 0., 0.45995501, 0.80332382],
-           ...                      [0.5007512, 0.45995501, 0., 0.65463348],
-           ...                      [0.91769271, 0.80332382, 0.65463348, 0.]],
-           ...                     ['A', 'B', 'C', 'D'])
-
-           Define metadata for each sample in a ``pandas.DataFrame``:
-
-           >>> import pandas as pd
-           >>> metadata = {
-           ...     'A': {'body_site': 'skin'},
-           ...     'B': {'body_site': 'gut'},
-           ...     'C': {'body_site': 'gut'},
-           ...     'D': {'body_site': 'skin'}}
-           >>> df = pd.DataFrame.from_dict(metadata, orient='index')
-
-           Run principal coordinate analysis (PCoA) on the distance matrix:
-
-           >>> from skbio.stats.ordination import pcoa
-           >>> pcoa_results = pcoa(dm)
-
-           Plot the ordination results, where each sample is colored by body
-           site (a categorical variable):
-
-           >>> fig = pcoa_results.plot(df=df, column='body_site',
-           ...                         title='Samples colored by body site',
-           ...                         cmap='Set1', s=50)
-
-        """
-        # Note: New features should not be added to this method and should
-        # instead be added to EMPeror (http://biocore.github.io/emperor/).
-        # Only bug fixes and minor updates should be made to this method.
-
-        coord_matrix = self.samples.values.T
-        self._validate_plot_axes(coord_matrix, axes)
-
-        # derived from
-        # http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
-        fig = plt.figure()
-        # create the axes, leaving room for a legend as described here:
-        # http://stackoverflow.com/a/9651897/3424666
-        ax = fig.add_axes([0.1, 0.1, 0.6, 0.75], projection='3d')
-
-        xs = coord_matrix[axes[0]]
-        ys = coord_matrix[axes[1]]
-        zs = coord_matrix[axes[2]]
-
-        point_colors, category_to_color = self._get_plot_point_colors(
-            df, column, self.samples.index, cmap)
-
-        scatter_fn = functools.partial(ax.scatter, xs, ys, zs, s=s)
-        if point_colors is None:
-            plot = scatter_fn()
-        else:
-            plot = scatter_fn(c=point_colors, cmap=cmap)
-
-        if axis_labels is None:
-            axis_labels = ['%d' % axis for axis in axes]
-        elif len(axis_labels) != 3:
-            raise ValueError("axis_labels must contain exactly three elements "
-                             "(found %d elements)." % len(axis_labels))
-
-        ax.set_xlabel(axis_labels[0])
-        ax.set_ylabel(axis_labels[1])
-        ax.set_zlabel(axis_labels[2])
-        ax.set_xticklabels([])
-        ax.set_yticklabels([])
-        ax.set_zticklabels([])
-        ax.set_title(title)
-
-        # create legend/colorbar
-        if point_colors is not None:
-            if category_to_color is None:
-                fig.colorbar(plot)
-            else:
-                self._plot_categorical_legend(ax, category_to_color)
-
-        return fig
-
-    def _validate_plot_axes(self, coord_matrix, axes):
-        """Validate `axes` against coordinates matrix."""
-        num_dims = coord_matrix.shape[0]
-        if num_dims < 3:
-            raise ValueError("At least three dimensions are required to plot "
-                             "ordination results. There are only %d "
-                             "dimension(s)." % num_dims)
-        if len(axes) != 3:
-            raise ValueError("`axes` must contain exactly three elements "
-                             "(found %d elements)." % len(axes))
-        if len(set(axes)) != 3:
-            raise ValueError("The values provided for `axes` must be unique.")
-
-        for idx, axis in enumerate(axes):
-            if axis < 0 or axis >= num_dims:
-                raise ValueError("`axes[%d]` must be >= 0 and < %d." %
-                                 (idx, num_dims))
-
-    def _get_plot_point_colors(self, df, column, ids, cmap):
-        """Return a list of colors for each plot point given a metadata column.
-
-        If `column` is categorical, additionally returns a dictionary mapping
-        each category (str) to color (used for legend creation).
-
-        """
-        if ((df is None and column is not None) or (df is not None and
-                                                    column is None)):
-            raise ValueError("Both df and column must be provided, or both "
-                             "must be None.")
-        elif df is None and column is None:
-            point_colors, category_to_color = None, None
-        else:
-            if column not in df:
-                raise ValueError("Column '%s' not in data frame." % column)
-
-            col_vals = df.loc[ids, column]
-
-            if col_vals.isnull().any():
-                raise ValueError("One or more IDs in the ordination results "
-                                 "are not in the data frame, or there is "
-                                 "missing data in the data frame's '%s' "
-                                 "column." % column)
-
-            category_to_color = None
-            try:
-                point_colors = col_vals.astype(float)
-            except ValueError:
-                # we have categorical data, so choose a color for each
-                # category, where colors are evenly spaced across the
-                # colormap.
-                # derived from http://stackoverflow.com/a/14887119
-                categories = col_vals.unique()
-                cmap = plt.get_cmap(cmap)
-                category_colors = cmap(np.linspace(0, 1, len(categories)))
-
-                category_to_color = dict(zip(categories, category_colors))
-                point_colors = col_vals.apply(lambda x: category_to_color[x])
-
-            point_colors = point_colors.tolist()
-
-        return point_colors, category_to_color
-
-    def _plot_categorical_legend(self, ax, color_dict):
-        """Add legend to plot using specified mapping of category to color."""
-        # derived from http://stackoverflow.com/a/20505720
-        proxies = []
-        labels = []
-        for category in color_dict:
-            proxy = mpl.lines.Line2D([0], [0], linestyle='none',
-                                     c=color_dict[category], marker='o')
-            proxies.append(proxy)
-            labels.append(category)
-
-        # place legend outside of the axes (centered)
-        # derived from http://matplotlib.org/users/legend_guide.html
-        ax.legend(proxies, labels, numpoints=1, loc=6,
-                  bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
-
-    # Here we define the special repr methods that provide the IPython display
-    # protocol. Code derived from:
-    #     https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
-    #         Custom%20Display%20Logic.ipynb
-    # See licenses/ipython.txt for more details.
-
-    def _repr_png_(self):
-        return self._figure_data('png')
-
-    def _repr_svg_(self):
-        return self._figure_data('svg')
-
-    # We expose the above reprs as properties, so that the user can see them
-    # directly (since otherwise the client dictates which one it shows by
-    # default)
-    @property
-    @experimental(as_of="0.4.0")
-    def png(self):
-        """Display basic 3-D scatterplot in IPython Notebook as PNG."""
-        return Image(self._repr_png_(), embed=True)
-
-    @property
-    @experimental(as_of="0.4.0")
-    def svg(self):
-        """Display basic 3-D scatterplot in IPython Notebook as SVG."""
-        return SVG(self._repr_svg_())
-
-    def _figure_data(self, format):
-        fig = self.plot()
-        data = print_figure(fig, format)
-        # We MUST close the figure, otherwise IPython's display machinery
-        # will pick it up and send it as output, resulting in a double display
-        plt.close(fig)
-        return data
-
-    def _format_attribute(self, attr, attr_label, formatter):
-        if attr is None:
-            formatted_attr = 'N/A'
-        else:
-            formatted_attr = formatter(attr)
-        return '\t%s: %s' % (attr_label, formatted_attr)
+        raise NotImplementedError
 
 
-class ElasticLines(object):
+class ElasticLines:
     """Store blocks of content separated by dashed lines.
 
     Each dashed line (separator) is as long as the longest content
diff --git a/skbio/alignment/__init__.py b/skbio/alignment/__init__.py
index d5eb52c..d2073a6 100644
--- a/skbio/alignment/__init__.py
+++ b/skbio/alignment/__init__.py
@@ -200,8 +200,6 @@ ACGTGCCTA-GGTACGCAAG
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._tabular_msa import TabularMSA
diff --git a/skbio/alignment/_indexing.py b/skbio/alignment/_indexing.py
index d8bbe4c..d27ee5e 100644
--- a/skbio/alignment/_indexing.py
+++ b/skbio/alignment/_indexing.py
@@ -6,15 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import with_metaclass
 from abc import ABCMeta, abstractmethod
 
 import numpy as np
 import pandas as pd
 
 
-class _Indexing(with_metaclass(ABCMeta, object)):
+class _Indexing(metaclass=ABCMeta):
     def __init__(self, instance, axis=None):
         self._obj = instance
         self._axis = axis
@@ -84,15 +82,15 @@ class _Indexing(with_metaclass(ABCMeta, object)):
 
     @abstractmethod
     def is_scalar(self, indexable, axis):
-        pass
+        raise NotImplementedError
 
     @abstractmethod
     def _get_sequence(self, obj, indexable):
-        pass
+        raise NotImplementedError
 
     @abstractmethod
     def _slice_sequences(self, obj, indexable):
-        pass
+        raise NotImplementedError
 
     def _get_position(self, obj, indexable):
         return obj._get_position_(indexable)
diff --git a/skbio/alignment/_lib/__init__.py b/skbio/alignment/_lib/__init__.py
index f3468bd..497b73a 100644
--- a/skbio/alignment/_lib/__init__.py
+++ b/skbio/alignment/_lib/__init__.py
@@ -6,7 +6,5 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 test = TestRunner(__file__).test
diff --git a/skbio/alignment/_pairwise.py b/skbio/alignment/_pairwise.py
index 409532a..c2d7e24 100644
--- a/skbio/alignment/_pairwise.py
+++ b/skbio/alignment/_pairwise.py
@@ -6,12 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 from warnings import warn
 from itertools import product
 
 import numpy as np
-from future.builtins import range, zip
 
 from skbio.alignment import TabularMSA
 from skbio.alignment._ssw_wrapper import StripedSmithWaterman
@@ -720,16 +718,24 @@ def local_pairwise_align_ssw(sequence1, sequence2, **kwargs):
             (alignment.target_begin, alignment.target_end_optimal)
         ]
 
+    metadata1 = metadata2 = None
+    if sequence1.has_metadata():
+        metadata1 = sequence1.metadata
+    if sequence2.has_metadata():
+        metadata2 = sequence2.metadata
+
     constructor = type(sequence1)
     msa = TabularMSA([
-        constructor(alignment.aligned_query_sequence),
-        constructor(alignment.aligned_target_sequence)
+        constructor(alignment.aligned_query_sequence, metadata=metadata1,
+                    validate=False),
+        constructor(alignment.aligned_target_sequence, metadata=metadata2,
+                    validate=False)
     ])
 
     return msa, alignment.optimal_alignment_score, start_end
 
 
- at deprecated(as_of="0.4.0", until="0.5.0",
+ at deprecated(as_of="0.4.0", until="0.5.1",
             reason="Will be replaced by a SubstitutionMatrix class. To track "
                    "progress, see [#161]"
                    "(https://github.com/biocore/scikit-bio/issues/161).")
@@ -900,12 +906,14 @@ def _compute_score_and_traceback_matrices(
 
     # Iterate over the characters in aln2 (which corresponds to the vertical
     # sequence in the matrix)
-    for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(), 1):
+    for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(
+            ignore_metadata=True), 1):
         aln2_chars = str(aln2_chars)
 
         # Iterate over the characters in aln1 (which corresponds to the
         # horizontal sequence in the matrix)
-        for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(), 1):
+        for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(
+                ignore_metadata=True), 1):
             aln1_chars = str(aln1_chars)
 
             # compute the score for a match/mismatch
@@ -1011,15 +1019,23 @@ def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
             raise ValueError(
                 "Invalid value in traceback matrix: %s" % current_value)
 
-    for i in range(aln1_sequence_count):
-        aligned_seq = ''.join(aligned_seqs1[i][::-1])
+    for i, (aligned_seq, original) in enumerate(zip(aligned_seqs1, aln1)):
+        aligned_seq = ''.join(aligned_seq)[::-1]
         constructor = aln1.dtype
-        aligned_seqs1[i] = constructor(aligned_seq)
-
-    for i in range(aln2_sequence_count):
-        aligned_seq = ''.join(aligned_seqs2[i][::-1])
+        metadata = None
+        if original.has_metadata():
+            metadata = original.metadata
+        aligned_seqs1[i] = constructor(aligned_seq, metadata=metadata,
+                                       validate=False)
+
+    for i, (aligned_seq, original) in enumerate(zip(aligned_seqs2, aln2)):
+        aligned_seq = ''.join(aligned_seq)[::-1]
         constructor = aln2.dtype
-        aligned_seqs2[i] = constructor(aligned_seq)
+        metadata = None
+        if original.has_metadata():
+            metadata = original.metadata
+        aligned_seqs2[i] = constructor(aligned_seq, metadata=metadata,
+                                       validate=False)
 
     return aligned_seqs1, aligned_seqs2, best_score, current_col, current_row
 
diff --git a/skbio/alignment/_repr.py b/skbio/alignment/_repr.py
index 701aca6..f2329fd 100644
--- a/skbio/alignment/_repr.py
+++ b/skbio/alignment/_repr.py
@@ -6,9 +6,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 
-from skbio.util._metadata_repr import _MetadataReprBuilder
+from skbio.metadata._repr import _MetadataReprBuilder
 
 
 class _TabularMSAReprBuilder(_MetadataReprBuilder):
diff --git a/skbio/alignment/_tabular_msa.py b/skbio/alignment/_tabular_msa.py
index 0de32b6..5555ad2 100644
--- a/skbio/alignment/_tabular_msa.py
+++ b/skbio/alignment/_tabular_msa.py
@@ -6,18 +6,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import collections
 import copy
 
-from future.builtins import range
-from future.utils import viewkeys, viewvalues
 import numpy as np
 import pandas as pd
 import scipy.stats
 
-from skbio._base import SkbioObject, MetadataMixin, PositionalMetadataMixin
+from skbio._base import SkbioObject
+from skbio.metadata._mixin import MetadataMixin, PositionalMetadataMixin
 from skbio.sequence import Sequence
 from skbio.sequence._grammared_sequence import GrammaredSequence
 from skbio.util._decorator import experimental, classonlymethod, overrides
@@ -53,11 +50,12 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
     minter : callable or metadata key, optional
         If provided, defines an index label for each sequence in `sequences`.
         Can either be a callable accepting a single argument (each sequence) or
-        a key into each sequence's ``metadata`` attribute.
+        a key into each sequence's ``metadata`` attribute. Note that `minter`
+        cannot be combined with `index`.
     index : pd.Index consumable, optional
         Index containing labels for `sequences`. Must be the same length as
         `sequences`. Must be able to be passed directly to ``pd.Index``
-        constructor.
+        constructor. Note that `index` cannot be combined with `minter`.
 
     Raises
     ------
@@ -65,6 +63,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         If `minter` and `index` are both provided.
     ValueError
         If `index` is not the same length as `sequences`.
+    TypeError
+        If `sequences` contains an object that isn't a ``GrammaredSequence``.
+    TypeError
+        If `sequences` does not contain exactly the same type of
+        ``GrammaredSequence`` objects.
+    ValueError
+        If `sequences` does not contain ``GrammaredSequence`` objects of the
+        same length.
 
     See Also
     --------
@@ -73,11 +79,12 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
     skbio.sequence.Protein
     pandas.DataFrame
     pandas.Index
+    reassign_index
 
     Notes
     -----
-    If `minter` or `index` are not provided, default pandas labels will be
-    used: integer labels ``0..(N-1)``, where ``N`` is the number of sequences.
+    If neither `minter` nor `index` are provided, default index labels will be
+    used: ``pd.RangeIndex(start=0, stop=len(sequences), step=1)``.
 
     Examples
     --------
@@ -101,10 +108,11 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
     AG-T
     -C-T
 
-    The MSA has default index labels:
+    Since `minter` or `index` wasn't provided, the MSA has default index
+    labels:
 
     >>> msa.index
-    Int64Index([0, 1, 2], dtype='int64')
+    RangeIndex(start=0, stop=3, step=1)
 
     Create an MSA with metadata, positional metadata, and non-default index
     labels:
@@ -131,6 +139,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
     """
     default_write_format = 'fasta'
+    __hash__ = None
 
     @property
     @experimental(as_of='0.4.1')
@@ -221,25 +230,27 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> from skbio import DNA, TabularMSA
         >>> seqs = [DNA('ACG', metadata={'id': 'a'}),
-        ...         DNA('AC-', metadata={'id': 'b'})]
+        ...         DNA('AC-', metadata={'id': 'b'}),
+        ...         DNA('AC-', metadata={'id': 'c'})]
         >>> msa = TabularMSA(seqs, minter='id')
 
         Retrieve index:
 
         >>> msa.index
-        Index(['a', 'b'], dtype='object')
+        Index(['a', 'b', 'c'], dtype='object')
 
         Set index:
 
-        >>> msa.index = ['seq1', 'seq2']
+        >>> msa.index = ['seq1', 'seq2', 'seq3']
         >>> msa.index
-        Index(['seq1', 'seq2'], dtype='object')
+        Index(['seq1', 'seq2', 'seq3'], dtype='object')
 
-        Delete index:
+        Deleting the index resets it to the ``TabularMSA`` constructor's
+        default:
 
         >>> del msa.index
         >>> msa.index
-        Int64Index([0, 1], dtype='int64')
+        RangeIndex(start=0, stop=3, step=1)
 
         """
         return self._seqs.index
@@ -249,13 +260,13 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         # Cast to Index to identify tuples as a MultiIndex to match
         # pandas constructor. Just setting would make an index of tuples.
         if not isinstance(index, pd.Index):
-            self._seqs.index = pd.Index(index)
-        else:
-            self._seqs.index = index
+            index = pd.Index(index)
+        self._seqs.index = index
 
     @index.deleter
     def index(self):
-        self.reassign_index()
+        # Create a memory-efficient integer index as the default MSA index.
+        self._seqs.index = pd.RangeIndex(start=0, stop=len(self), step=1)
 
     @property
     @experimental(as_of="0.4.1")
@@ -336,14 +347,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.loc['b']
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: True
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 33.33%
-        -----------------------------
+        --------------------------
         0 A-GT
 
         Similarly when we slice the second axis by a scalar we get a column of
@@ -366,14 +377,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.loc['a', 0]
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 1
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 0.00%
-        -----------------------------
+        --------------------------
         0 A
 
         In other words, it exactly matches slicing the resulting sequence
@@ -381,14 +392,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.loc['a'][0]
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 1
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 0.00%
-        -----------------------------
+        --------------------------
         0 A
 
         When our slice is non-scalar we get back an MSA of the same `dtype`:
@@ -476,14 +487,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.loc(axis='sequence')['a', 0]
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 50.00%
-        -----------------------------
+        --------------------------
         0 ACGT
 
         This selected the first sequence because the complete label was
@@ -595,14 +606,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.iloc[1]
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: True
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 33.33%
-        -----------------------------
+        --------------------------
         0 A-GT
 
         Similarly when we slice the second axis by a scalar we get a column of
@@ -625,14 +636,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.iloc[0, 0]
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 1
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 0.00%
-        -----------------------------
+        --------------------------
         0 A
 
         In other words, it exactly matches slicing the resulting sequence
@@ -640,14 +651,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> msa.iloc[0][0]
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 1
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 0.00%
-        -----------------------------
+        --------------------------
         0 A
 
         When our slice is non-scalar we get back an MSA of the same `dtype`:
@@ -746,12 +757,11 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         True
 
         """
-        # Python 2 and 3 guarantee same order of iteration as long as no
+        # Python 3 guarantees same order of iteration as long as no
         # modifications are made to the dictionary between calls:
-        #     https://docs.python.org/2/library/stdtypes.html#dict.items
         #     https://docs.python.org/3/library/stdtypes.html#
         #         dictionary-view-objects
-        return cls(viewvalues(dictionary), index=viewkeys(dictionary))
+        return cls(dictionary.values(), index=dictionary.keys())
 
     @experimental(as_of='0.4.1')
     def __init__(self, sequences, metadata=None, positional_metadata=None,
@@ -765,8 +775,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             if minter is None and index is None:
                 index = sequences.index
 
+        # Give a better error message than the one raised by `extend` (it
+        # references `reset_index`, which isn't a constructor parameter).
+        if minter is not None and index is not None:
+            raise ValueError(
+                "Cannot use both `minter` and `index` at the same time.")
         self._seqs = pd.Series([])
-        self.extend(sequences, minter=minter, index=index)
+        self.extend(sequences, minter=minter, index=index,
+                    reset_index=minter is None and index is None)
 
         MetadataMixin._init_(self, metadata=metadata)
         PositionalMetadataMixin._init_(
@@ -789,6 +805,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
                 metadata = self.metadata
             else:
                 metadata = None
+
         if positional_metadata is NotImplemented:
             if self.has_positional_metadata():
                 positional_metadata = self.positional_metadata
@@ -859,11 +876,9 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         """
         # It is impossible to have 0 sequences and >0 positions.
+        # TODO: change for #1198
         return self.shape.position > 0
 
-    # Python 2 compatibility.
-    __nonzero__ = __bool__
-
     @experimental(as_of='0.4.1')
     def __contains__(self, label):
         """Determine if an index label is in this MSA.
@@ -1182,28 +1197,37 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         except TypeError:  # NaN hit the constructor, key was bad... probably
             raise KeyError("Part of `%r` was not in the index.")
 
-    def _get_position_(self, i):
+    def _get_position_(self, i, ignore_metadata=False):
+        if ignore_metadata:
+            return Sequence(''.join([str(s[i]) for s in self._seqs]))
+
         seq = Sequence.concat([s[i] for s in self._seqs], how='outer')
-        if self.has_positional_metadata():
+        # TODO: change for #1198
+        if len(self) and self.has_positional_metadata():
             seq.metadata = dict(self.positional_metadata.iloc[i])
         return seq
 
     def _slice_positions_(self, i):
         seqs = self._seqs.apply(lambda seq: seq[i])
+        # TODO: change for #1198
         pm = None
-        if self.has_positional_metadata():
+        if len(self) and self.has_positional_metadata():
             pm = self.positional_metadata.iloc[i]
         return self._constructor_(seqs, positional_metadata=pm)
     # end of helpers
 
     @experimental(as_of='0.4.1')
-    def iter_positions(self, reverse=False):
+    def iter_positions(self, reverse=False, ignore_metadata=False):
         """Iterate over positions (columns) in the MSA.
 
         Parameters
         ----------
         reverse : bool, optional
             If ``True``, iterate over positions in reverse order.
+        ignore_metadata : bool, optional
+            If ``True``, ``Sequence.metadata`` and
+            ``Sequence.positional_metadata`` will not be included. This can
+            significantly improve performance if metadata is not needed.
 
         Yields
         ------
@@ -1224,10 +1248,12 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         real biological sequence.
 
         Each ``Sequence`` object will have its corresponding MSA positional
-        metadata stored as ``metadata``.
+        metadata stored as ``metadata`` unless ``ignore_metadata`` is set to
+        ``True``.
 
         Sequences will have their positional metadata concatenated using an
-        outer join. See ``Sequence.concat(how='outer')`` for details.
+        outer join unless ``ignore_metadata`` is set to ``True``. See
+        ``Sequence.concat(how='outer')`` for details.
 
         Examples
         --------
@@ -1313,7 +1339,8 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         if reverse:
             indices = reversed(indices)
 
-        return (self._get_position_(index) for index in indices)
+        return (self._get_position_(index, ignore_metadata=ignore_metadata)
+                for index in indices)
 
     @experimental(as_of='0.4.1')
     def consensus(self):
@@ -1351,16 +1378,16 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         ...                  positional_metadata={'prob': [2, 1, 2, 3, 5]})
         >>> msa.consensus()
         DNA
-        -----------------------------
+        --------------------------
         Positional metadata:
             'prob': <dtype: int64>
         Stats:
             length: 5
             has gaps: True
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 33.33%
-        -----------------------------
+        --------------------------
         0 AT-C-
 
         Note that the last position in the MSA has more than one type of gap
@@ -1379,7 +1406,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             positional_metadata = self.positional_metadata
 
         consensus = []
-        for position in self.iter_positions():
+        for position in self.iter_positions(ignore_metadata=True):
             freqs = position.frequencies()
 
             gap_freq = 0
@@ -1395,14 +1422,14 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
                      positional_metadata=positional_metadata)
 
     def _build_inverse_shannon_uncertainty_f(self, include_gaps):
-        base = len(self.dtype.nondegenerate_chars)
+        base = len(self.dtype.definite_chars)
         if include_gaps:
             # Increment the base by one to reflect the possible inclusion of
             # the default gap character.
             base += 1
 
         def f(p):
-            freqs = list(p.kmer_frequencies(k=1).values())
+            freqs = list(p.frequencies().values())
             return 1. - scipy.stats.entropy(freqs, base=base)
         return f
 
@@ -1461,12 +1488,12 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         ``"inverse_shannon_uncertainty"`` metric.
 
         ``gap_mode = "include"`` will result in all gap characters being
-        recoded to ``Alignment.dtype.default_gap_char``. Because no
+        recoded to ``TabularMSA.dtype.default_gap_char``. Because no
         conservation metrics that we are aware of consider different gap
         characters differently (e.g., none of the metrics described in [1]_),
         they are all treated the same within this method.
 
-        The ``inverse_shannon_uncertainty`` metric is simiply one minus
+        The ``inverse_shannon_uncertainty`` metric is simply one minus
         Shannon's uncertainty metric. This method uses the inverse of Shannon's
         uncertainty so that larger values imply higher conservation. Shannon's
         uncertainty is also referred to as Shannon's entropy, but when making
@@ -1505,7 +1532,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
                         gap_mode == 'include')
 
         result = []
-        for p in self.iter_positions():
+        for p in self.iter_positions(ignore_metadata=True):
             cons = None
             # cast p to self.dtype for access to gap/degenerate related
             # functionality
@@ -1532,12 +1559,8 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
                     pos_seq = pos_seq.degap()
                 else:  # gap_mode == 'include' is the only choice left
                     # Recode all gap characters with pos_seq.default_gap_char.
-                    # This logic should be replaced with a call to
-                    # pos_seq.replace when it exists.
-                    # https://github.com/biocore/scikit-bio/issues/1222
-                    with pos_seq._byte_ownership():
-                        pos_seq._bytes[pos_seq.gaps()] = \
-                            ord(pos_seq.default_gap_char)
+                    pos_seq = pos_seq.replace(pos_seq.gaps(),
+                                              pos_seq.default_gap_char)
 
             if cons is None:
                 cons = metric_f(pos_seq)
@@ -1604,7 +1627,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         """
         if self._is_sequence_axis(axis):
-            seq_iterator = self.iter_positions()
+            seq_iterator = self.iter_positions(ignore_metadata=True)
             length = self.shape.sequence
         else:
             seq_iterator = self
@@ -1621,7 +1644,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             # guaranteed to always have two gap characters). See unit tests for
             # an example.
             freqs = seq.frequencies(chars=self.dtype.gap_chars)
-            gap_freqs.append(sum(viewvalues(freqs)))
+            gap_freqs.append(sum(freqs.values()))
 
         gap_freqs = np.asarray(gap_freqs, dtype=float if relative else int)
 
@@ -1636,7 +1659,7 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         Parameters
         ----------
-        mapping : dict-like or callable, optional
+        mapping : dict or callable, optional
             Dictionary or callable that maps existing labels to new labels. Any
             label without a mapping will remain the same.
         minter : callable or metadata key, optional
@@ -1655,9 +1678,8 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         Notes
         -----
-        If neither `mapping` nor `minter` are provided, default pandas labels
-        will be used: integer labels ``0..(N-1)``, where ``N`` is the number of
-        sequences.
+        If neither `mapping` nor `minter` are provided, index labels will be
+        reset to the ``TabularMSA`` constructor's default.
 
         Examples
         --------
@@ -1665,47 +1687,53 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         >>> from skbio import DNA, TabularMSA
         >>> seqs = [DNA('ACG', metadata={'id': 'a'}),
-        ...         DNA('AC-', metadata={'id': 'b'})]
+        ...         DNA('AC-', metadata={'id': 'b'}),
+        ...         DNA('CCG', metadata={'id': 'c'})]
         >>> msa = TabularMSA(seqs)
         >>> msa.index
-        Int64Index([0, 1], dtype='int64')
+        RangeIndex(start=0, stop=3, step=1)
 
         Assign new index to the MSA using each sequence's ID as a label:
 
         >>> msa.reassign_index(minter='id')
         >>> msa.index
-        Index(['a', 'b'], dtype='object')
+        Index(['a', 'b', 'c'], dtype='object')
 
         Assign default index:
 
         >>> msa.reassign_index()
         >>> msa.index
-        Int64Index([0, 1], dtype='int64')
+        RangeIndex(start=0, stop=3, step=1)
 
         Alternatively, a mapping of existing labels to new labels may be passed
         via `mapping`:
 
         >>> msa.reassign_index(mapping={0: 'seq1', 1: 'seq2'})
         >>> msa.index
-        Index(['seq1', 'seq2'], dtype='object')
+        Index(['seq1', 'seq2', 2], dtype='object')
 
         """
         if mapping is not None and minter is not None:
             raise ValueError(
                 "Cannot use both `mapping` and `minter` at the same time.")
+
         if mapping is not None:
-            self._seqs.rename(mapping, inplace=True)
+            if isinstance(mapping, dict):
+                self.index = [mapping[label] if label in mapping else label
+                              for label in self.index]
+            elif callable(mapping):
+                self.index = [mapping(label) for label in self.index]
+            else:
+                raise TypeError(
+                    "`mapping` must be a dict or callable, not type %r"
+                    % type(mapping).__name__)
         elif minter is not None:
-            index = [resolve_key(seq, minter) for seq in self._seqs]
-
-            # Cast to Index to identify tuples as a MultiIndex to match
-            # pandas constructor. Just setting would make an index of tuples.
-            self.index = pd.Index(index)
+            self.index = [resolve_key(seq, minter) for seq in self._seqs]
         else:
-            self._seqs.reset_index(drop=True, inplace=True)
+            del self.index
 
     @experimental(as_of='0.4.1')
-    def append(self, sequence, minter=None, index=None):
+    def append(self, sequence, minter=None, index=None, reset_index=False):
         """Append a sequence to the MSA without recomputing alignment.
 
         Parameters
@@ -1717,18 +1745,20 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             Used to create an index label for the sequence being appended. If
             callable, it generates a label directly. Otherwise it's treated as
             a key into the sequence metadata. Note that `minter` cannot be
-            combined with `index`.
+            combined with `index` nor `reset_index`.
         index : object, optional
             Index label to use for the appended sequence. Note that `index`
-            cannot be combined with `minter`.
+            cannot be combined with `minter` nor `reset_index`.
+        reset_index : bool, optional
+            If ``True``, this MSA's index is reset to the ``TabularMSA``
+            constructor's default after appending. Note that `reset_index`
+            cannot be combined with `minter` nor `index`.
 
         Raises
         ------
         ValueError
-            If both `minter` and `index` are provided.
-        ValueError
-            If neither `minter` nor `index` are provided and the MSA has a
-            non-default index.
+            If exactly one choice of `minter`, `index`, or `reset_index` is not
+            provided.
         TypeError
             If the sequence object isn't a ``GrammaredSequence``.
         TypeError
@@ -1744,16 +1774,15 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         Notes
         -----
-        If neither `minter` nor `index` are provided and this MSA has default
-        index labels, the new index label will be auto-incremented.
-
         The MSA is not automatically re-aligned when a sequence is appended.
         Therefore, this operation is not necessarily meaningful on its own.
 
         Examples
         --------
+        Create an MSA with a single sequence labeled ``'seq1'``:
+
         >>> from skbio import DNA, TabularMSA
-        >>> msa = TabularMSA([DNA('ACGT')])
+        >>> msa = TabularMSA([DNA('ACGT')], index=['seq1'])
         >>> msa
         TabularMSA[DNA]
         ---------------------
@@ -1762,7 +1791,13 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             position count: 4
         ---------------------
         ACGT
-        >>> msa.append(DNA('AG-T'))
+        >>> msa.index
+        Index(['seq1'], dtype='object')
+
+        Append a new sequence to the MSA, providing its index label via
+        `index`:
+
+        >>> msa.append(DNA('AG-T'), index='seq2')
         >>> msa
         TabularMSA[DNA]
         ---------------------
@@ -1772,22 +1807,36 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         ---------------------
         ACGT
         AG-T
+        >>> msa.index
+        Index(['seq1', 'seq2'], dtype='object')
 
-        Auto-incrementing index labels:
+        Append another sequence, this time resetting the MSA's index labels to
+        the default with `reset_index`. Note that since the MSA's index is
+        reset, we do not need to provide an index label for the new sequence
+        via `index` or `minter`:
 
+        >>> msa.append(DNA('ACGA'), reset_index=True)
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 3
+            position count: 4
+        ---------------------
+        ACGT
+        AG-T
+        ACGA
         >>> msa.index
-        Int64Index([0, 1], dtype='int64')
-        >>> msa.append(DNA('ACGA'))
-        >>> msa.index
-        Int64Index([0, 1, 2], dtype='int64')
+        RangeIndex(start=0, stop=3, step=1)
 
         """
         if index is not None:
             index = [index]
-        self.extend([sequence], minter=minter, index=index)
+        self.extend([sequence], minter=minter, index=index,
+                    reset_index=reset_index)
 
     @experimental(as_of='0.4.1')
-    def extend(self, sequences, minter=None, index=None):
+    def extend(self, sequences, minter=None, index=None, reset_index=False):
         """Extend this MSA with sequences without recomputing alignment.
 
         Parameters
@@ -1799,27 +1848,29 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             Used to create index labels for the sequences being appended. If
             callable, it generates a label directly. Otherwise it's treated as
             a key into the sequence metadata. Note that `minter` cannot be
-            combined with `index`.
+            combined with `index` nor `reset_index`.
         index : pd.Index consumable, optional
             Index labels to use for the appended sequences. Must be the same
             length as `sequences`. Must be able to be passed directly to
             ``pd.Index`` constructor. Note that `index` cannot be combined
-            with `minter`.
+            with `minter` nor `reset_index`.
+        reset_index : bool, optional
+            If ``True``, this MSA's index is reset to the ``TabularMSA``
+            constructor's default after extending. Note that `reset_index`
+            cannot be combined with `minter` nor `index`.
 
         Raises
         ------
         ValueError
-            If both `minter` and `index` are both provided.
-        ValueError
-            If neither `minter` nor `index` are provided and the MSA has a
-            non-default index.
+            If exactly one choice of `minter`, `index`, or `reset_index` is not
+            provided.
         ValueError
             If `index` is not the same length as `sequences`.
         TypeError
             If `sequences` contains an object that isn't a
             ``GrammaredSequence``.
         TypeError
-            If `sequence` contains a type that does not match the dtype of the
+            If `sequences` contains a type that does not match the dtype of the
             MSA.
         ValueError
             If the length of a sequence does not match the number of positions
@@ -1832,16 +1883,15 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
 
         Notes
         -----
-        If neither `minter` nor `index` are provided and this MSA has default
-        index labels, the new index labels will be auto-incremented.
-
         The MSA is not automatically re-aligned when appending sequences.
         Therefore, this operation is not necessarily meaningful on its own.
 
         Examples
         --------
+        Create an MSA with a single sequence labeled ``'seq1'``:
+
         >>> from skbio import DNA, TabularMSA
-        >>> msa = TabularMSA([DNA('ACGT')])
+        >>> msa = TabularMSA([DNA('ACGT')], index=['seq1'])
         >>> msa
         TabularMSA[DNA]
         ---------------------
@@ -1850,7 +1900,13 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
             position count: 4
         ---------------------
         ACGT
-        >>> msa.extend([DNA('AG-T'), DNA('-G-T')])
+        >>> msa.index
+        Index(['seq1'], dtype='object')
+
+        Extend the MSA with sequences, providing their index labels via
+        `index`:
+
+        >>> msa.extend([DNA('AG-T'), DNA('-G-T')], index=['seq2', 'seq3'])
         >>> msa
         TabularMSA[DNA]
         ---------------------
@@ -1861,45 +1917,91 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         ACGT
         AG-T
         -G-T
+        >>> msa.index
+        Index(['seq1', 'seq2', 'seq3'], dtype='object')
 
-        Auto-incrementing index labels:
+        Extend with more sequences, this time resetting the MSA's index labels
+        to the default with `reset_index`. Note that since the MSA's index is
+        reset, we do not need to provide index labels for the new sequences via
+        `index` or `minter`:
 
+        >>> msa.extend([DNA('ACGA'), DNA('AC-T'), DNA('----')],
+        ...            reset_index=True)
+        >>> msa
+        TabularMSA[DNA]
+        ---------------------
+        Stats:
+            sequence count: 6
+            position count: 4
+        ---------------------
+        ACGT
+        AG-T
+        ...
+        AC-T
+        ----
         >>> msa.index
-        Int64Index([0, 1, 2], dtype='int64')
-        >>> msa.extend([DNA('ACGA'), DNA('AC-T'), DNA('----')])
-        >>> msa.index
-        Int64Index([0, 1, 2, 3, 4, 5], dtype='int64')
+        RangeIndex(start=0, stop=6, step=1)
 
         """
-        if minter is not None and index is not None:
+        if sum([minter is not None,
+                index is not None,
+                bool(reset_index)]) != 1:
             raise ValueError(
-                "Cannot use both `minter` and `index` at the same time.")
+                "Must provide exactly one of the following parameters: "
+                "`minter`, `index`, `reset_index`")
 
+        # Verify `sequences` first because `minter` could interact with each
+        # sequence's `metadata`.
         sequences = list(sequences)
+        self._assert_valid_sequences(sequences)
 
-        if minter is None and index is None:
-            if self.index.equals(pd.Index(np.arange(len(self)))):
-                index = range(len(self), len(self) + len(sequences))
-            else:
+        if minter is not None:
+            # Convert to Index to identify tuples as a MultiIndex instead of an
+            # index of tuples.
+            index = pd.Index([resolve_key(seq, minter) for seq in sequences])
+        elif index is not None:
+            # Convert to Index to identify tuples as a MultiIndex instead of an
+            # index of tuples.
+            if not isinstance(index, pd.Index):
+                index = pd.Index(index)
+
+            # pandas doesn't give a user-friendly error message if we pass
+            # through.
+            if len(sequences) != len(index):
                 raise ValueError(
-                    "MSA does not have default index labels, must provide "
-                    "a `minter` or `index` for sequence(s).")
-        elif minter is not None:
-            index = [resolve_key(seq, minter) for seq in sequences]
-
-        # Cast to Index to identify tuples as a MultiIndex to match
-        # pandas constructor. Just setting would make an index of tuples.
-        if not isinstance(index, pd.Index):
-            index = pd.Index(index)
+                    "Number of sequences (%d) must match index length (%d)" %
+                    (len(sequences), len(index)))
+        else:
+            # Case for `reset_index=True`. We could simply set `index=None`
+            # since it will be reset after appending below, but we can avoid a
+            # memory spike if Series.append creates a new RangeIndex from
+            # adjacent RangeIndexes in the future (pandas 0.18.0 creates an
+            # Int64Index).
+            index = pd.RangeIndex(start=len(self),
+                                  stop=len(self) + len(sequences),
+                                  step=1)
 
-        self._assert_valid_sequences(sequences)
+        if len(self):
+            self._seqs = self._seqs.append(pd.Series(sequences, index=index))
+        else:
+            # Not using Series.append to avoid turning a RangeIndex supplied
+            # via `index` parameter into an Int64Index (this happens in pandas
+            # 0.18.0).
+            self._seqs = pd.Series(sequences, index=index)
+
+            # When extending a TabularMSA without sequences, the number of
+            # positions in the TabularMSA may change from zero to non-zero. If
+            # this happens, the TabularMSA's positional_metadata must be reset
+            # to its default "empty" representation for the new number of
+            # positions, otherwise the number of positions in the TabularMSA
+            # and positional_metadata will differ.
+            #
+            # TODO: change for #1198
+            if self.shape.position > 0:
+                del self.positional_metadata
 
-        # pandas doesn't give a user-friendly error message if we pass through.
-        if len(sequences) != len(index):
-            raise ValueError(
-                "Number of sequences (%d) must match index length (%d)" %
-                (len(sequences), len(index)))
-        self._seqs = self._seqs.append(pd.Series(sequences, index=index))
+        if reset_index:
+            self.reassign_index()
 
     def _assert_valid_sequences(self, sequences):
         if not sequences:
@@ -2117,8 +2219,8 @@ class TabularMSA(MetadataMixin, PositionalMetadataMixin, SkbioObject):
         Index(['a', 'b', 'c', 'z'], dtype='object')
         >>> joined.positional_metadata
            col1  col2 col3
-        0    42     1  NaN
-        1    43     2  NaN
+        0  42.0     1  NaN
+        1  43.0     2  NaN
         2   NaN     3    f
         3   NaN     4    o
         4   NaN     5    o
diff --git a/skbio/alignment/tests/__init__.py b/skbio/alignment/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/alignment/tests/__init__.py
+++ b/skbio/alignment/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/alignment/tests/test_pairwise.py b/skbio/alignment/tests/test_pairwise.py
index d3ceb73..8c30697 100644
--- a/skbio/alignment/tests/test_pairwise.py
+++ b/skbio/alignment/tests/test_pairwise.py
@@ -6,12 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 import warnings
 
-import six
 import numpy as np
 
 from skbio import Sequence, Protein, DNA, RNA, TabularMSA
@@ -41,7 +38,7 @@ class CustomSequence(GrammaredSequence):
 
     @classproperty
     @overrides(GrammaredSequence)
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set('WXYZ')
 
     @classproperty
@@ -89,7 +86,9 @@ class PairwiseAlignmentTests(TestCase):
                     'U': {'A': -4, 'C': -4, 'G': -4, 'T': -4, 'U':  5}}
         self.assertEqual(make_identity_substitution_matrix(5, -4), expected)
 
-    def test_global_pairwise_align_custom_alphabet(self):
+    # TODO: duplicate of test_global_pairwise_align_custom_alphabet, remove
+    # when nondegenerate_chars is removed
+    def test_global_pairwise_align_custom_alphabet_nondegenerate_chars(self):
         custom_substitution_matrix = make_identity_substitution_matrix(
             1, -1, alphabet=CustomSequence.nondegenerate_chars)
 
@@ -109,7 +108,29 @@ class PairwiseAlignmentTests(TestCase):
         self.assertEqual(custom_score, 2.0)
         self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
 
-    def test_local_pairwise_align_custom_alphabet(self):
+    def test_global_pairwise_align_custom_alphabet(self):
+        custom_substitution_matrix = make_identity_substitution_matrix(
+            1, -1, alphabet=CustomSequence.definite_chars)
+
+        custom_msa, custom_score, custom_start_end = global_pairwise_align(
+            CustomSequence("WXYZ"), CustomSequence("WXYYZZ"),
+            10.0, 5.0, custom_substitution_matrix)
+
+        # Expected values computed by running an equivalent alignment using the
+        # DNA alphabet with the following mapping:
+        #
+        #     W X Y Z
+        #     | | | |
+        #     A C G T
+        #
+        self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),
+                                                 CustomSequence('WXYYZZ')]))
+        self.assertEqual(custom_score, 2.0)
+        self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
+
+    # TODO: duplicate of test_local_pairwise_align_custom_alphabet, remove
+    # when nondegenerate_chars is removed.
+    def test_local_pairwise_align_custom_alphabet_nondegenerate_chars(self):
         custom_substitution_matrix = make_identity_substitution_matrix(
             5, -4, alphabet=CustomSequence.nondegenerate_chars)
 
@@ -132,20 +153,43 @@ class PairwiseAlignmentTests(TestCase):
         self.assertEqual(custom_score, 41.0)
         self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
 
+    def test_local_pairwise_align_custom_alphabet(self):
+        custom_substitution_matrix = make_identity_substitution_matrix(
+            5, -4, alphabet=CustomSequence.definite_chars)
+
+        custom_msa, custom_score, custom_start_end = local_pairwise_align(
+            CustomSequence("YWXXZZYWXXWYYZWXX"),
+            CustomSequence("YWWXZZZYWXYZWWX"), 5.0, 0.5,
+            custom_substitution_matrix)
+
+        # Expected values computed by running an equivalent alignment using the
+        # DNA alphabet with the following mapping:
+        #
+        #     W X Y Z
+        #     | | | |
+        #     A C G T
+        #
+        self.assertEqual(
+            custom_msa,
+            TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),
+                        CustomSequence('WXZZZYWX^^^YZWWX')]))
+        self.assertEqual(custom_score, 41.0)
+        self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
+
     def test_global_pairwise_align_invalid_type(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   "GrammaredSequence.*"
-                                   "TabularMSA.*'Sequence'"):
+        with self.assertRaisesRegex(TypeError,
+                                    "GrammaredSequence.*"
+                                    "TabularMSA.*'Sequence'"):
             global_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
 
     def test_global_pairwise_align_dtype_mismatch(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   "same dtype: 'DNA' != 'RNA'"):
+        with self.assertRaisesRegex(TypeError,
+                                    "same dtype: 'DNA' != 'RNA'"):
             global_pairwise_align(DNA('ACGT'), TabularMSA([RNA('ACGU')]),
                                   1.0, 1.0, {})
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   "same dtype: 'DNA' != 'RNA'"):
+        with self.assertRaisesRegex(TypeError,
+                                    "same dtype: 'DNA' != 'RNA'"):
             global_pairwise_align(TabularMSA([DNA('ACGT')]),
                                   TabularMSA([RNA('ACGU')]),
                                   1.0, 1.0, {})
@@ -176,8 +220,11 @@ class PairwiseAlignmentTests(TestCase):
             Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
 
-        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
-                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
+                        Protein("---PAW-HEAE", metadata={'id': "s2"})]))
+
         self.assertEqual(obs_score, 23.0)
         self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
 
@@ -187,8 +234,11 @@ class PairwiseAlignmentTests(TestCase):
             Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
 
-        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
-                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
+                        Protein("---PAW-HEAE", metadata={'id': "s2"})]))
+
         self.assertEqual(obs_score, 23.0)
         self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
 
@@ -200,9 +250,12 @@ class PairwiseAlignmentTests(TestCase):
             TabularMSA([Protein("PAWHEAE", metadata={'id': "s3"})]),
             gap_open_penalty=10., gap_extend_penalty=5.)
 
-        self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
-                                              Protein("HDAGAWGHDE-"),
-                                              Protein("---PAW-HEAE")]))
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
+                        Protein("HDAGAWGHDE-", metadata={'id': "s2"}),
+                        Protein("---PAW-HEAE", metadata={'id': "s3"})]))
+
         self.assertEqual(obs_score, 21.0)
         self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
 
@@ -213,9 +266,9 @@ class PairwiseAlignmentTests(TestCase):
                           Protein("HEAGAWGHEE"), 42)
 
     def test_global_pairwise_align_protein_invalid_dtype(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   "TabularMSA with Protein dtype.*dtype "
-                                   "'DNA'"):
+        with self.assertRaisesRegex(TypeError,
+                                    "TabularMSA with Protein dtype.*dtype "
+                                    "'DNA'"):
             global_pairwise_align_protein(TabularMSA([Protein('PAW')]),
                                           TabularMSA([DNA('ACGT')]))
 
@@ -285,8 +338,10 @@ class PairwiseAlignmentTests(TestCase):
             Protein("PAWHEAE", metadata={'id': "s2"}),
             gap_open_penalty=10., gap_extend_penalty=5.)
 
-        self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE"),
-                                              Protein("AW-HE")]))
+        self.assertEqual(
+            obs_msa, TabularMSA([Protein("AWGHE", metadata={'id': "s1"}),
+                                 Protein("AW-HE", metadata={'id': "s2"})]))
+
         self.assertEqual(obs_score, 26.0)
         self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
 
@@ -337,8 +392,11 @@ class PairwiseAlignmentTests(TestCase):
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
 
-        self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
-                                              DNA("GAACTTTGAC---GTAAC")]))
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([DNA("-GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+                        DNA("GAACTTTGAC---GTAAC", metadata={'id': "s2"})]))
+
         self.assertEqual(obs_score, 32.0)
         self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
 
@@ -350,9 +408,12 @@ class PairwiseAlignmentTests(TestCase):
             gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
             mismatch_score=-4)
 
-        self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
-                                              DNA("-GACCATGACCAGGTACC"),
-                                              DNA("GAACTTTGAC---GTAAC")]))
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([DNA("-GACCTTGACCAGGTACC", metadata={'id': "s1"}),
+                        DNA("-GACCATGACCAGGTACC", metadata={'id': "s2"}),
+                        DNA("GAACTTTGAC---GTAAC", metadata={'id': "s3"})]))
+
         self.assertEqual(obs_score, 27.5)
         self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
 
@@ -363,9 +424,9 @@ class PairwiseAlignmentTests(TestCase):
                           DNA("ACGT"), 42)
 
     def test_global_pairwise_align_nucleotide_invalid_dtype(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   "TabularMSA with DNA or RNA dtype.*dtype "
-                                   "'Protein'"):
+        with self.assertRaisesRegex(TypeError,
+                                    "TabularMSA with DNA or RNA dtype.*dtype "
+                                    "'Protein'"):
             global_pairwise_align_nucleotide(TabularMSA([DNA('ACGT')]),
                                              TabularMSA([Protein('PAW')]))
 
@@ -397,8 +458,11 @@ class PairwiseAlignmentTests(TestCase):
             gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
             mismatch_score=-4)
 
-        self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGAC"),
-                                              DNA("ACTTTGAC")]))
+        self.assertEqual(
+            obs_msa,
+            TabularMSA([DNA("ACCTTGAC", metadata={'id': "s1"}),
+                        DNA("ACTTTGAC", metadata={'id': "s2"})]))
+
         self.assertEqual(obs_score, 31.0)
         self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
 
@@ -461,13 +525,13 @@ class PairwiseAlignmentTests(TestCase):
         self.assertEqual(start_end_no_sub, start_end_alt_sub)
 
     def test_local_pairwise_align_invalid_type(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   'GrammaredSequence.*Sequence'):
+        with self.assertRaisesRegex(TypeError,
+                                    'GrammaredSequence.*Sequence'):
             local_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
 
     def test_local_pairwise_align_type_mismatch(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   "same type: 'DNA' != 'RNA'"):
+        with self.assertRaisesRegex(TypeError,
+                                    "same type: 'DNA' != 'RNA'"):
             local_pairwise_align(DNA('ACGT'), RNA('ACGU'), 1.0, 1.0, {})
 
     def test_init_matrices_sw(self):
@@ -619,10 +683,11 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         # start at bottom-right
-        expected = ([DNA("ACG-")], [DNA("ACGT")], 1, 0, 0)
+        expected = ([DNA("ACG-", metadata={'id': 'foo'})],
+                    [DNA("ACGT", metadata={'id': 'bar'})], 1, 0, 0)
         actual = _traceback(tback_m, score_m,
-                            TabularMSA([DNA('ACG', metadata={'id': ''})]),
-                            TabularMSA([DNA('ACGT', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACG', metadata={'id': 'foo'})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': 'bar'})]),
                             4, 3)
         self.assertEqual(actual, expected)
 
@@ -640,10 +705,10 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
         # start at bottom-right
-        expected = ([DNA("ACG-"),
-                     DNA("ACG-")],
-                    [DNA("ACGT"),
-                     DNA("ACGT")],
+        expected = ([DNA("ACG-", metadata={'id': 's1'}),
+                     DNA("ACG-", metadata={'id': 's2'})],
+                    [DNA("ACGT", metadata={'id': 's3'}),
+                     DNA("ACGT", metadata={'id': 's4'})],
                     1, 0, 0)
         actual = _traceback(tback_m, score_m,
                             TabularMSA([DNA('ACG', metadata={'id': 's1'}),
@@ -654,11 +719,11 @@ class PairwiseAlignmentTests(TestCase):
         self.assertEqual(actual, expected)
 
         # start at highest-score
-        expected = ([DNA("ACG")],
-                    [DNA("ACG")], 6, 0, 0)
+        expected = ([DNA("ACG", metadata={'id': 'foo'})],
+                    [DNA("ACG", metadata={'id': 'bar'})], 6, 0, 0)
         actual = _traceback(tback_m, score_m,
-                            TabularMSA([DNA('ACG', metadata={'id': ''})]),
-                            TabularMSA([DNA('ACGT', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACG', metadata={'id': 'foo'})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': 'bar'})]),
                             3, 3)
         self.assertEqual(actual, expected)
 
@@ -669,11 +734,11 @@ class PairwiseAlignmentTests(TestCase):
                    [2, 2, 2, 1],
                    [2, 2, 2, 2]]
         tback_m = np.array(tback_m)
-        expected = ([DNA("G")],
-                    [DNA("G")], 6, 2, 2)
+        expected = ([DNA("G", metadata={'id': 'a'})],
+                    [DNA("G", metadata={'id': 'a'})], 6, 2, 2)
         actual = _traceback(tback_m, score_m,
-                            TabularMSA([DNA('ACG', metadata={'id': ''})]),
-                            TabularMSA([DNA('ACGT', metadata={'id': ''})]),
+                            TabularMSA([DNA('ACG', metadata={'id': 'a'})]),
+                            TabularMSA([DNA('ACGT', metadata={'id': 'a'})]),
                             3, 3)
         self.assertEqual(actual, expected)
 
diff --git a/skbio/alignment/tests/test_ssw.py b/skbio/alignment/tests/test_ssw.py
index fe55113..bf27132 100644
--- a/skbio/alignment/tests/test_ssw.py
+++ b/skbio/alignment/tests/test_ssw.py
@@ -16,12 +16,8 @@
 # the resulting alignments are verified by hand. Creating tests from the base
 # C API is impractical at this time.
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 
-import six
-
 from skbio import (local_pairwise_align_ssw, Sequence, DNA, RNA, Protein,
                    TabularMSA)
 from skbio.alignment import StripedSmithWaterman, AlignmentStructure
@@ -616,15 +612,14 @@ class TestAlignStripedSmithWaterman(TestSSW):
         self._check_TabularMSA_to_AlignmentStructure(align2, align1, DNA)
 
     def test_invalid_type(self):
-        with six.assertRaisesRegex(self, TypeError, "not type 'Sequence'"):
+        with self.assertRaisesRegex(TypeError, "not type 'Sequence'"):
             local_pairwise_align_ssw(DNA('ACGT'), Sequence('ACGT'))
 
-        with six.assertRaisesRegex(self, TypeError, "not type 'str'"):
+        with self.assertRaisesRegex(TypeError, "not type 'str'"):
             local_pairwise_align_ssw('ACGU', RNA('ACGU'))
 
     def test_type_mismatch(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   "same type: 'DNA' != 'RNA'"):
+        with self.assertRaisesRegex(TypeError, "same type: 'DNA' != 'RNA'"):
             local_pairwise_align_ssw(DNA('ACGT'), RNA('ACGU'))
 
 
diff --git a/skbio/alignment/tests/test_tabular_msa.py b/skbio/alignment/tests/test_tabular_msa.py
index 7f61e76..352ce41 100644
--- a/skbio/alignment/tests/test_tabular_msa.py
+++ b/skbio/alignment/tests/test_tabular_msa.py
@@ -6,15 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
+import collections
 import copy
 import unittest
 import functools
 import itertools
 import types
 
-import six
 import numpy as np
 import numpy.testing as npt
 import pandas as pd
@@ -23,9 +21,10 @@ import scipy.stats
 from skbio import Sequence, DNA, RNA, Protein, TabularMSA
 from skbio.sequence import GrammaredSequence
 from skbio.util._decorator import classproperty, overrides
-from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
-                                 PositionalMetadataMixinTests,
-                                 assert_index_equal,
+from skbio.util._testing import ReallyEqualMixin
+from skbio.metadata._testing import (MetadataMixinTests,
+                                     PositionalMetadataMixinTests)
+from skbio.util._testing import (assert_index_equal,
                                  assert_data_frame_almost_equal)
 
 
@@ -69,35 +68,34 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
     def test_from_dict_invalid_input(self):
         # Basic test to make sure error-checking in the TabularMSA constructor
         # is being invoked.
-        with six.assertRaisesRegex(
-                self, ValueError, 'must match the number of positions'):
+        with self.assertRaisesRegex(
+                ValueError, 'must match the number of positions'):
             TabularMSA.from_dict({'a': DNA('ACG'), 'b': DNA('ACGT')})
 
     def test_constructor_invalid_dtype(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   'GrammaredSequence.*Sequence'):
+        with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
             TabularMSA([Sequence('')])
 
-        with six.assertRaisesRegex(self, TypeError, 'GrammaredSequence.*int'):
+        with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*int'):
             TabularMSA([42, DNA('')])
 
     def test_constructor_not_monomorphic(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   'matching type.*RNA.*DNA'):
+        with self.assertRaisesRegex(TypeError,
+                                    'matching type.*RNA.*DNA'):
             TabularMSA([DNA(''), RNA('')])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'matching type.*float.*Protein'):
+        with self.assertRaisesRegex(TypeError,
+                                    'matching type.*float.*Protein'):
             TabularMSA([Protein(''), Protein(''), 42.0, Protein('')])
 
     def test_constructor_unequal_length(self):
-        with six.assertRaisesRegex(
-                self, ValueError,
+        with self.assertRaisesRegex(
+                ValueError,
                 'must match the number of positions.*1 != 0'):
             TabularMSA([Protein(''), Protein('P')])
 
-        with six.assertRaisesRegex(
-                self, ValueError,
+        with self.assertRaisesRegex(
+                ValueError,
                 'must match the number of positions.*1 != 3'):
             TabularMSA([Protein('PAW'), Protein('ABC'), Protein('A')])
 
@@ -105,32 +103,49 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         with self.assertRaises(TypeError):
             TabularMSA(42)
 
-    def test_constructor_non_unique_labels(self):
-        msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
-
-        assert_index_equal(msa.index, pd.Int64Index([1, 1]))
-
     def test_constructor_minter_and_index_both_provided(self):
-        with six.assertRaisesRegex(self, ValueError, 'both.*minter.*index'):
+        with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
             TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str,
                        index=['a', 'b'])
 
+    def test_constructor_invalid_minter_callable(self):
+        with self.assertRaises(TypeError):
+            TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=float)
+
+    def test_constructor_missing_minter_metadata_key(self):
+        with self.assertRaises(KeyError):
+            TabularMSA([DNA('ACGT', metadata={'foo': 'bar'}), DNA('TGCA')],
+                       minter='foo')
+
+    def test_constructor_unhashable_minter_metadata_key(self):
+        with self.assertRaises(TypeError):
+            TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=[])
+
     def test_constructor_index_length_mismatch_iterable(self):
-        with six.assertRaisesRegex(self, ValueError,
-                                   'sequences.*2.*index length.*0'):
+        with self.assertRaisesRegex(ValueError,
+                                    'sequences.*2.*index length.*0'):
             TabularMSA([DNA('ACGT'), DNA('TGCA')], index=iter([]))
 
     def test_constructor_index_length_mismatch_index_object(self):
-        with six.assertRaisesRegex(self, ValueError,
-                                   'sequences.*2.*index length.*0'):
+        with self.assertRaisesRegex(ValueError,
+                                    'sequences.*2.*index length.*0'):
             TabularMSA([DNA('ACGT'), DNA('TGCA')], index=pd.Index([]))
 
+    def test_constructor_invalid_index_scalar(self):
+        with self.assertRaises(TypeError):
+            TabularMSA([DNA('ACGT'), DNA('TGCA')], index=42)
+
+    def test_constructor_non_unique_labels(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('ACGT')], index=[1, 1])
+
+        assert_index_equal(msa.index, pd.Int64Index([1, 1]))
+
     def test_constructor_empty_no_index(self):
         # sequence empty
         msa = TabularMSA([])
         self.assertIsNone(msa.dtype)
         self.assertEqual(msa.shape, (0, 0))
-        assert_index_equal(msa.index, pd.Index([]))
+        assert_index_equal(msa.index, pd.RangeIndex(0))
         with self.assertRaises(StopIteration):
             next(iter(msa))
 
@@ -139,7 +154,7 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         msa = TabularMSA(seqs)
         self.assertIs(msa.dtype, DNA)
         self.assertEqual(msa.shape, (2, 0))
-        assert_index_equal(msa.index, pd.Int64Index([0, 1]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
         self.assertEqual(list(msa), seqs)
 
     def test_constructor_empty_with_labels(self):
@@ -164,7 +179,7 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         msa = TabularMSA(seqs)
         self.assertIs(msa.dtype, DNA)
         self.assertEqual(msa.shape, (1, 3))
-        assert_index_equal(msa.index, pd.Index([0]))
+        assert_index_equal(msa.index, pd.RangeIndex(1))
         self.assertEqual(list(msa), seqs)
 
         # 3x1
@@ -172,7 +187,7 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         msa = TabularMSA(seqs)
         self.assertIs(msa.dtype, DNA)
         self.assertEqual(msa.shape, (3, 1))
-        assert_index_equal(msa.index, pd.Index([0, 1, 2]))
+        assert_index_equal(msa.index, pd.RangeIndex(3))
         self.assertEqual(list(msa), seqs)
 
     def test_constructor_non_empty_with_labels_provided(self):
@@ -213,15 +228,24 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         self.assertIsInstance(msa.index, pd.MultiIndex)
         assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
 
-    def test_copy_constructor_handles_missing_metadata_efficiently(self):
+    def test_copy_constructor_respects_default_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('----'), DNA('AAAA')])
+
+        copy = TabularMSA(msa)
+
+        self.assertEqual(msa, copy)
+        self.assertIsNot(msa, copy)
+        assert_index_equal(msa.index, pd.RangeIndex(3))
+        assert_index_equal(copy.index, pd.RangeIndex(3))
+
+    def test_copy_constructor_without_metadata(self):
         msa = TabularMSA([DNA('ACGT'), DNA('----')])
 
         copy = TabularMSA(msa)
 
-        self.assertIsNone(msa._metadata)
-        self.assertIsNone(msa._positional_metadata)
-        self.assertIsNone(copy._metadata)
-        self.assertIsNone(copy._positional_metadata)
+        self.assertEqual(msa, copy)
+        self.assertIsNot(msa, copy)
+        assert_index_equal(copy.index, pd.RangeIndex(2))
 
     def test_copy_constructor_with_metadata(self):
         msa = TabularMSA([DNA('ACGT'),
@@ -236,7 +260,8 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         self.assertIsNot(msa, copy)
         self.assertIsNot(msa.metadata, copy.metadata)
         self.assertIsNot(msa.positional_metadata, copy.positional_metadata)
-        self.assertIsNot(msa.index, copy.index)
+        # pd.Index is immutable, no copy necessary.
+        self.assertIs(msa.index, copy.index)
 
     def test_copy_constructor_state_override_with_minter(self):
         msa = TabularMSA([DNA('ACGT'),
@@ -280,6 +305,12 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
                        positional_metadata={'bar': range(4, 8)},
                        index=['a', 'b']))
 
+    def test_copy_constructor_with_minter_and_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('----')], index=['idx1', 'idx2'])
+
+        with self.assertRaisesRegex(ValueError, 'both.*minter.*index'):
+            TabularMSA(msa, index=['a', 'b'], minter=str)
+
     def test_dtype(self):
         self.assertIsNone(TabularMSA([]).dtype)
         self.assertIs(TabularMSA([Protein('')]).dtype, Protein)
@@ -304,6 +335,17 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         with self.assertRaises(AttributeError):
             del TabularMSA([]).shape
 
+    def test_index_getter_default_index(self):
+        msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
+
+        assert_index_equal(msa.index, pd.RangeIndex(3))
+
+        # immutable
+        with self.assertRaises(TypeError):
+            msa.index[1] = 2
+        # original state is maintained
+        assert_index_equal(msa.index, pd.RangeIndex(3))
+
     def test_index_getter(self):
         index = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')], minter=str).index
         self.assertIsInstance(index, pd.Index)
@@ -329,17 +371,16 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
     def test_index_setter_non_empty(self):
         msa = TabularMSA([DNA('AC'), DNA('AG'), DNA('AT')])
         msa.index = range(3)
-        assert_index_equal(msa.index, pd.Index([0, 1, 2]))
+        assert_index_equal(msa.index, pd.RangeIndex(3))
         msa.index = range(3, 6)
-        assert_index_equal(msa.index, pd.Index([3, 4, 5]))
+        assert_index_equal(msa.index, pd.RangeIndex(3, 6))
 
     def test_index_setter_length_mismatch(self):
         msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
         index = pd.Index(['ACGT', 'TGCA'])
         assert_index_equal(msa.index, index)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'Length mismatch.*2.*3'):
+        with self.assertRaisesRegex(ValueError, 'Length mismatch.*2.*3'):
             msa.index = iter(['ab', 'cd', 'ef'])
 
         # original state is maintained
@@ -363,11 +404,24 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
             msa.index,
             pd.Index([('foo', 42), ('bar', 43)], tupleize_cols=True))
 
+    def test_index_setter_preserves_range_index(self):
+        msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
+
+        msa.index = pd.RangeIndex(2)
+
+        self.assertEqual(msa, TabularMSA([RNA('UUU'), RNA('AAA')]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
+
     def test_index_deleter(self):
         msa = TabularMSA([RNA('UUU'), RNA('AAA')], minter=str)
         assert_index_equal(msa.index, pd.Index(['UUU', 'AAA']))
+
+        del msa.index
+        assert_index_equal(msa.index, pd.RangeIndex(2))
+
+        # Delete again.
         del msa.index
-        assert_index_equal(msa.index, pd.Index([0, 1]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
 
     def test_bool(self):
         self.assertFalse(TabularMSA([]))
@@ -487,12 +541,21 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         msa2 = TabularMSA([DNA('ACGT', metadata={'id': 'a'})], minter='id')
         self.assertReallyEqual(msa1, msa2)
 
+    def test_eq_default_index_and_equivalent_provided_index(self):
+        msa1 = TabularMSA([DNA('ACGT'), DNA('----'), DNA('....')])
+        msa2 = TabularMSA([DNA('ACGT'), DNA('----'), DNA('....')],
+                          index=[0, 1, 2])
+
+        self.assertReallyEqual(msa1, msa2)
+        assert_index_equal(msa1.index, pd.RangeIndex(3))
+        assert_index_equal(msa2.index, pd.Int64Index([0, 1, 2]))
+
     def test_reassign_index_empty(self):
         # sequence empty
         msa = TabularMSA([])
         msa.reassign_index()
         self.assertEqual(msa, TabularMSA([]))
-        assert_index_equal(msa.index, pd.Int64Index([]))
+        assert_index_equal(msa.index, pd.RangeIndex(0))
 
         msa.reassign_index(minter=str)
         self.assertEqual(msa, TabularMSA([], minter=str))
@@ -502,7 +565,7 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         msa = TabularMSA([DNA('')])
         msa.reassign_index()
         self.assertEqual(msa, TabularMSA([DNA('')]))
-        assert_index_equal(msa.index, pd.Index([0]))
+        assert_index_equal(msa.index, pd.RangeIndex(1))
 
         msa.reassign_index(minter=str)
         self.assertEqual(msa, TabularMSA([DNA('')], minter=str))
@@ -528,18 +591,27 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         assert_index_equal(msa.index, pd.Index([5, 2]))
 
         msa.reassign_index()
-        assert_index_equal(msa.index, pd.Index([0, 1]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
 
     def test_reassign_index_minter_and_mapping_both_provided(self):
         msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'both.*mapping.*minter.*'):
+        with self.assertRaisesRegex(ValueError, 'both.*mapping.*minter.*'):
             msa.reassign_index(minter=str, mapping={"ACGT": "fleventy"})
 
         # original state is maintained
         assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
 
+    def test_reassign_index_mapping_invalid_type(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')], minter=str)
+
+        with self.assertRaisesRegex(TypeError,
+                                    'mapping.*dict.*callable.*list'):
+            msa.reassign_index(mapping=['abc', 'def'])
+
+        # original state is maintained
+        assert_index_equal(msa.index, pd.Index(['ACGT', 'TGCA']))
+
     def test_reassign_index_with_mapping_dict_empty(self):
         seqs = [DNA("A"), DNA("C"), DNA("G")]
         msa = TabularMSA(seqs, index=[0.5, 1.5, 2.5])
@@ -573,6 +645,10 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
 
         self.assertEqual(msa, TabularMSA(seqs, index=['0', '1', '2']))
 
+        msa.reassign_index(mapping=lambda e: int(e) + 42)
+
+        self.assertEqual(msa, TabularMSA(seqs, index=[42, 43, 44]))
+
     def test_reassign_index_non_unique_existing_index(self):
         seqs = [DNA("A"), DNA("C"), DNA("G")]
         mapping = {0.5: "a", 1.5: "b", 2.5: "c", 3.5: "d"}
@@ -620,7 +696,6 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
             TabularMSA([DNA('AC'), DNA('.G')],
                        index=[('foo', 42), ('bar', 43)]))
 
-    @unittest.skipIf(six.PY2, "Everything is orderable in Python 2.")
     def test_sort_on_unorderable_msa_index(self):
         msa = TabularMSA([DNA('AAA'), DNA('ACG'), DNA('---')],
                          index=[42, 41, 'foo'])
@@ -698,6 +773,20 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
                 DNA('TTT', metadata={'id': 'b'}),
                 DNA('TTT', metadata={'id': 'c'})], minter=str))
 
+    def test_sort_default_index(self):
+        msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')])
+        msa.sort()
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')]))
+
+    def test_sort_default_index_descending(self):
+        msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')])
+        msa.sort(ascending=False)
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('CC'), DNA('GG'), DNA('TC')], index=[2, 1, 0]))
+
     def test_sort_already_sorted(self):
         msa = TabularMSA([DNA('TC'), DNA('GG'), DNA('CC')], index=[1, 2, 3])
         msa.sort()
@@ -758,6 +847,13 @@ class TestTabularMSA(unittest.TestCase, ReallyEqualMixin):
         msa = TabularMSA(seqs, minter='id')
         self.assertEqual(msa.to_dict(), {42: seqs[0], -999: seqs[1]})
 
+    def test_to_dict_default_index(self):
+        msa = TabularMSA([RNA('UUA'), RNA('-C-'), RNA('AAA')])
+
+        d = msa.to_dict()
+
+        self.assertEqual(d, {0: RNA('UUA'), 1: RNA('-C-'), 2: RNA('AAA')})
+
     def test_to_dict_duplicate_labels(self):
         msa = TabularMSA([DNA("A"), DNA("G")], index=[0, 0])
 
@@ -823,7 +919,7 @@ class TestCopy(unittest.TestCase):
         self.assertIsNot(msa[0], msa_copy[0])
         self.assertIsNot(msa[1], msa_copy[1])
 
-        msa_copy.append(DNA('AAAA'))
+        msa_copy.append(DNA('AAAA'), reset_index=True)
         self.assertEqual(
             msa,
             TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
@@ -844,7 +940,8 @@ class TestCopy(unittest.TestCase):
 
         self.assertEqual(msa, msa_copy)
         self.assertIsNot(msa, msa_copy)
-        self.assertIsNot(msa.index, msa_copy.index)
+        # pd.Index is immutable, no copy necessary.
+        self.assertIs(msa.index, msa_copy.index)
 
         msa_copy.index = [1, 2]
         assert_index_equal(msa_copy.index, pd.Index([1, 2]))
@@ -872,7 +969,7 @@ class TestDeepCopy(unittest.TestCase):
         self.assertIsNot(msa[0], msa_copy[0])
         self.assertIsNot(msa[1], msa_copy[1])
 
-        msa_copy.append(DNA('AAAA'))
+        msa_copy.append(DNA('AAAA'), reset_index=True)
         self.assertEqual(
             msa,
             TabularMSA([DNA('ACGT', metadata={'foo': [1]}), DNA('TGCA')]))
@@ -893,14 +990,15 @@ class TestDeepCopy(unittest.TestCase):
 
         self.assertEqual(msa, msa_copy)
         self.assertIsNot(msa, msa_copy)
-        self.assertIsNot(msa.index, msa_copy.index)
+        # pd.Index is immutable, no copy necessary.
+        self.assertIs(msa.index, msa_copy.index)
 
         msa_copy.index = [1, 2]
         assert_index_equal(msa_copy.index, pd.Index([1, 2]))
         assert_index_equal(msa.index, pd.Index(['foo', 'bar']))
 
 
-class SharedIndexTests(object):
+class SharedIndexTests:
     def get(self, obj, indexable):
         raise NotImplementedError()
 
@@ -1327,26 +1425,26 @@ class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
         msa = TabularMSA([Protein(""), Protein(""), Protein("")],
                          index=[s, slice(1, 2), slice(2, 3)])
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
             self.get(msa, Ellipsis, axis=0)
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
             self.get(msa, s, axis=0)
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
             self.get(msa, 0, axis=0)
 
     def test_unhashable_index_second_axis(self):
         msa = TabularMSA([Protein("AA"), Protein("CC"), Protein("AA")],
                          index=[slice(0, 1), slice(1, 2), slice(2, 3)])
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
             self.get(msa, Ellipsis, axis=1)
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
             self.get(msa, [0, 1], axis=1)
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable'):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
             self.get(msa, 0, axis=1)
 
     def test_unhashable_index_both_axes(self):
@@ -1354,14 +1452,14 @@ class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
         msa = TabularMSA([RNA("AA"), RNA("CC"), RNA("AA")],
                          index=[s, [1, 2], [2, 3]])
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable.*list'):
+        with self.assertRaisesRegex(TypeError, 'unhashable.*list'):
             # This implies copy cannot be derived from getitem
             self.get(msa, (Ellipsis, Ellipsis))
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable.*list'):
+        with self.assertRaisesRegex(TypeError, 'unhashable.*list'):
             self.get(msa, (s, 0))
 
-        with six.assertRaisesRegex(self, TypeError, 'unhashable.*list'):
+        with self.assertRaisesRegex(TypeError, 'unhashable.*list'):
             self.get(msa, ('x', 10))
 
     def test_categorical_index_scalar_label(self):
@@ -1398,8 +1496,8 @@ class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
         msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
                          index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'tuple.*independent.*MultiIndex'):
+        with self.assertRaisesRegex(TypeError,
+                                    'tuple.*independent.*MultiIndex'):
             self.get(msa, ['a', 'b'])
 
     def test_missing_first_nonscalar_fancy_index(self):
@@ -1413,14 +1511,13 @@ class TestLoc(SharedPropertyIndexTests, unittest.TestCase):
         msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')],
                          index=[('a', 0, 1), ('a', 1, 1), ('b', 0, 1)])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'tuple.*pd.MultiIndex.*label'):
+        with self.assertRaisesRegex(TypeError, 'tuple.*pd.MultiIndex.*label'):
             self.get(msa, ((('a', 0, 1), ('b', 0, 1)), Ellipsis))
 
     def test_non_multiindex_tuple(self):
         msa = TabularMSA([DNA('ACGT'), DNA('ACGT'), DNA('ACGT')])
 
-        with six.assertRaisesRegex(self, TypeError, 'tuple.*first axis'):
+        with self.assertRaisesRegex(TypeError, 'tuple.*first axis'):
             self.get(msa, ((0, 1), Ellipsis))
 
     def test_assertion_exists_for_future_failure_of_get_sequence_loc(self):
@@ -1802,83 +1899,75 @@ class TestConstructor(unittest.TestCase):
 
 
 class TestAppend(unittest.TestCase):
-    def test_to_empty_msa(self):
+    # Error cases
+    def test_invalid_minter_index_reset_index_parameter_combos(self):
         msa = TabularMSA([])
 
-        msa.append(DNA('ACGT'))
-
-        self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
-
-    def test_to_empty_with_minter(self):
-        msa = TabularMSA([], minter=str)
-
-        msa.append(DNA('ACGT'))
-
-        self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
-
-    def test_to_empty_msa_with_index(self):
-        msa = TabularMSA([])
+        param_combos = (
+            {},
+            {'minter': str, 'index': 'foo', 'reset_index': True},
+            {'minter': str, 'index': 'foo'},
+            {'minter': str, 'reset_index': True},
+            {'index': 'foo', 'reset_index': True}
+        )
 
-        msa.append(DNA('ACGT'), index='a')
+        for params in param_combos:
+            with self.assertRaisesRegex(ValueError,
+                                        "one of.*minter.*index.*reset_index"):
+                msa.append(DNA('ACGT'), **params)
 
-        self.assertEqual(
-            msa,
-            TabularMSA([DNA('ACGT')], index=['a']))
+            self.assertEqual(msa, TabularMSA([]))
 
-    def test_to_empty_msa_invalid_dtype(self):
+    def test_invalid_dtype(self):
         msa = TabularMSA([])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'GrammaredSequence.*Sequence'):
-            msa.append(Sequence(''))
+        with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
+            msa.append(Sequence(''), reset_index=True)
 
         self.assertEqual(msa, TabularMSA([]))
 
-    def test_to_empty_msa_invalid_minter(self):
-        msa = TabularMSA([])
+    def test_dtype_mismatch_rna(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
 
-        with self.assertRaises(KeyError):
-            msa.append(DNA('ACGT'), minter='id')
+        with self.assertRaisesRegex(TypeError, 'matching type.*RNA.*DNA'):
+            msa.append(RNA('UUUU'), reset_index=True)
 
-        self.assertEqual(msa, TabularMSA([]))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
 
-    def test_to_non_empty_msa_invalid_minter(self):
-        msa = TabularMSA([DNA('ACGT')], index=['foo'])
+    def test_dtype_mismatch_float(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
 
-        with self.assertRaises(KeyError):
-            msa.append(DNA('AAAA'), minter='id')
+        with self.assertRaisesRegex(TypeError, 'matching type.*float.*DNA'):
+            msa.append(42.0, reset_index=True)
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
 
-    def test_wrong_dtype_rna(self):
+    def test_length_mismatch(self):
         msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'matching type.*RNA.*DNA'):
-            msa.append(RNA('UUUU'))
+        with self.assertRaisesRegex(
+                ValueError, 'must match the number of positions.*5 != 4'):
+            msa.append(DNA('ACGTA'), reset_index=True)
 
         self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
 
-    def test_wrong_dtype_float(self):
-        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+    def test_invalid_minter(self):
+        msa = TabularMSA([DNA('ACGT')], index=['foo'])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'matching type.*float.*DNA'):
-            msa.append(42.0)
+        with self.assertRaises(KeyError):
+            msa.append(DNA('AAAA'), minter='id')
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
 
-    def test_wrong_length(self):
-        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+    # Valid cases: `minter`
+    def test_minter_empty_msa(self):
+        msa = TabularMSA([])
 
-        with six.assertRaisesRegex(
-                self, ValueError,
-                'must match the number of positions.*5 != 4'):
-            msa.append(DNA('ACGTA'))
+        msa.append(DNA('ACGT'), minter=str)
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], minter=str))
 
-    def test_with_minter_metadata_key(self):
+    def test_minter_metadata_key(self):
         msa = TabularMSA([DNA('', metadata={'id': 'a'}),
                           DNA('', metadata={'id': 'b'})],
                          minter='id')
@@ -1892,7 +1981,7 @@ class TestAppend(unittest.TestCase):
                 DNA('', metadata={'id': 'b'}),
                 DNA('', metadata={'id': 'c'})], minter='id'))
 
-    def test_with_minter_callable(self):
+    def test_minter_callable(self):
         msa = TabularMSA([DNA('', metadata={'id': 'a'}),
                           DNA('', metadata={'id': 'b'})],
                          minter='id')
@@ -1906,69 +1995,51 @@ class TestAppend(unittest.TestCase):
                 DNA('', metadata={'id': 'b'}),
                 DNA('')], index=['a', 'b', '']))
 
-    def test_with_index(self):
-        msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
-
-        msa.append(DNA('--'), index='foo')
+    def test_multiindex_minter_empty_msa(self):
+        def multiindex_minter(seq):
+            return ('foo', 42)
 
-        self.assertEqual(
-            msa,
-            TabularMSA([DNA('AC'), DNA('GT'), DNA('--')],
-                       index=['a', 'b', 'foo']))
+        msa = TabularMSA([])
 
-    def test_no_index_no_minter(self):
-        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+        msa.append(DNA('AC'), minter=multiindex_minter)
 
-        msa.append(DNA('AAAA'))
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42)]))
 
-        self.assertEqual(
-            msa,
-            TabularMSA([DNA('ACGT'), DNA('TGCA'), DNA('AAAA')]))
+    def test_multiindex_minter_non_empty_msa(self):
+        def multiindex_minter(seq):
+            return ('baz', 44)
 
-    def test_no_index_no_minter_msa_has_non_default_labels(self):
-        msa = TabularMSA([DNA(''), DNA('')], index=['a', 'b'])
+        msa = TabularMSA([RNA('UU'), RNA('CA')],
+                         index=[('foo', 42), ('bar', 43)])
 
-        with six.assertRaisesRegex(self, ValueError, "provide.*minter.*index"):
-            msa.append(DNA(''))
+        msa.append(RNA('AC'), minter=multiindex_minter)
 
-        self.assertEqual(msa, TabularMSA([DNA(''), DNA('')], index=['a', 'b']))
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index,
+                           pd.Index([('foo', 42), ('bar', 43), ('baz', 44)]))
 
-    def test_with_index_type_change(self):
-        msa = TabularMSA([DNA('A'), DNA('.')])
+    # Valid cases: `index`
+    def test_index_empty_msa(self):
+        msa = TabularMSA([])
 
-        msa.append(DNA('C'), index='foo')
+        msa.append(DNA('ACGT'), index='a')
 
         self.assertEqual(
             msa,
-            TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
-
-    def test_with_index_and_minter(self):
-        msa = TabularMSA([])
-
-        with six.assertRaisesRegex(self, ValueError, "both.*minter.*index"):
-            msa.append(DNA(''), index='', minter=str)
-
-        self.assertEqual(msa, TabularMSA([]))
-
-    def test_multiple_appends_to_empty_msa_with_default_labels(self):
-        msa = TabularMSA([])
-
-        msa.append(RNA('U--'))
-        msa.append(RNA('AA.'))
-
-        self.assertEqual(msa, TabularMSA([RNA('U--'), RNA('AA.')]))
+            TabularMSA([DNA('ACGT')], index=['a']))
 
-    def test_multiple_appends_to_non_empty_msa_with_default_labels(self):
-        msa = TabularMSA([RNA('U--'), RNA('AA.')])
+    def test_index_non_empty_msa(self):
+        msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
 
-        msa.append(RNA('ACG'))
-        msa.append(RNA('U-U'))
+        msa.append(DNA('--'), index='foo')
 
         self.assertEqual(
             msa,
-            TabularMSA([RNA('U--'), RNA('AA.'), RNA('ACG'), RNA('U-U')]))
+            TabularMSA([DNA('AC'), DNA('GT'), DNA('--')],
+                       index=['a', 'b', 'foo']))
 
-    def test_with_multiindex_index(self):
+    def test_multiindex_index_empty_msa(self):
         msa = TabularMSA([])
 
         msa.append(DNA('AA'), index=('foo', 42))
@@ -1976,176 +2047,194 @@ class TestAppend(unittest.TestCase):
         self.assertIsInstance(msa.index, pd.MultiIndex)
         assert_index_equal(msa.index, pd.Index([('foo', 42)]))
 
-    def test_with_multiindex_minter(self):
-        def multiindex_minter(seq):
-            return ('foo', 42)
-
-        msa = TabularMSA([])
+    def test_multiindex_index_non_empty_msa(self):
+        msa = TabularMSA([RNA('A'), RNA('C')],
+                         index=[('foo', 42), ('bar', 43)])
 
-        msa.append(DNA('AC'), minter=multiindex_minter)
+        msa.append(RNA('U'), index=('baz', 44))
 
         self.assertIsInstance(msa.index, pd.MultiIndex)
-        assert_index_equal(msa.index, pd.Index([('foo', 42)]))
+        assert_index_equal(msa.index,
+                           pd.Index([('foo', 42), ('bar', 43), ('baz', 44)]))
 
-
-class TestExtend(unittest.TestCase):
-    def test_empty_to_empty(self):
+    # Valid cases: `reset_index`
+    def test_reset_index_empty_msa(self):
         msa = TabularMSA([])
 
-        msa.extend([])
+        msa.append(DNA('ACGT'), reset_index=True)
 
-        self.assertEqual(msa, TabularMSA([]))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')]))
+        assert_index_equal(msa.index, pd.RangeIndex(1))
 
-    def test_empty_to_non_empty(self):
-        msa = TabularMSA([DNA('AC')])
+    def test_reset_index_default_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('CCCC')])
 
-        msa.extend([])
+        msa.append(DNA('ACGT'), reset_index=True)
 
-        self.assertEqual(msa, TabularMSA([DNA('AC')]))
+        self.assertEqual(msa,
+                         TabularMSA([DNA('ACGT'), DNA('CCCC'), DNA('ACGT')]))
+        assert_index_equal(msa.index, pd.RangeIndex(3))
 
-    def test_single_sequence(self):
-        msa = TabularMSA([DNA('AC')])
+    def test_reset_index_non_default_index(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('CCCC')], index=['foo', 'bar'])
 
-        msa.extend([DNA('-C')])
+        msa.append(DNA('ACGT'), reset_index=True)
 
-        self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('-C')]))
+        self.assertEqual(msa,
+                         TabularMSA([DNA('ACGT'), DNA('CCCC'), DNA('ACGT')]))
+        assert_index_equal(msa.index, pd.RangeIndex(3))
 
-    def test_multiple_sequences(self):
-        msa = TabularMSA([DNA('AC')])
+    def test_reset_index_bool_cast(self):
+        msa = TabularMSA([RNA('AC'), RNA('UU')], index=[42, 43])
 
-        msa.extend([DNA('-C'), DNA('AG')])
+        msa.append(RNA('..'), reset_index='abc')
 
-        self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('-C'), DNA('AG')]))
+        self.assertEqual(msa, TabularMSA([RNA('AC'), RNA('UU'), RNA('..')]))
+        assert_index_equal(msa.index, pd.RangeIndex(3))
 
-    def test_from_iterable(self):
-        msa = TabularMSA([])
+    # Valid cases (misc)
+    def test_index_type_change(self):
+        msa = TabularMSA([DNA('A'), DNA('.')])
 
-        msa.extend(iter([DNA('ACGT'), DNA('TGCA')]))
+        msa.append(DNA('C'), index='foo')
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
 
-    def test_from_tabular_msa_default_labels(self):
-        msa = TabularMSA([DNA('AC'), DNA('TG')])
+    def test_duplicate_index(self):
+        msa = TabularMSA([DNA('A'), DNA('.')], index=['foo', 'bar'])
 
-        msa.extend(TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')],
-                              index=['a', 'b', 'c']))
+        msa.append(DNA('C'), index='foo')
 
         self.assertEqual(
             msa,
-            TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
-                        DNA('AA')]))
+            TabularMSA([DNA('A'), DNA('.'), DNA('C')],
+                       index=['foo', 'bar', 'foo']))
 
-    def test_from_tabular_msa_non_default_labels(self):
-        msa = TabularMSA([DNA('AC'), DNA('TG')], index=['a', 'b'])
+    def test_empty_msa_with_positional_metadata_no_new_positions(self):
+        msa = TabularMSA([], positional_metadata={'foo': []})
 
-        with six.assertRaisesRegex(self, ValueError, 'provide.*minter.*index'):
-            msa.extend(TabularMSA([DNA('GG'), DNA('CC')]))
+        msa.append(DNA(''), reset_index=True)
 
         self.assertEqual(
             msa,
-            TabularMSA([DNA('AC'), DNA('TG')], index=['a', 'b']))
+            TabularMSA([DNA('')], positional_metadata={'foo': []}))
 
-    def test_from_tabular_msa_with_index(self):
-        msa1 = TabularMSA([DNA('AC'), DNA('TG')])
-        msa2 = TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')])
+    def test_empty_msa_with_positional_metadata_add_new_positions(self):
+        # bug in 0.4.2
+        msa = TabularMSA([], positional_metadata={'foo': []})
 
-        msa1.extend(msa2, index=msa2.index)
+        msa.append(DNA('AA'), reset_index=True)
 
         self.assertEqual(
-            msa1,
-            TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
-                        DNA('AA')], index=[0, 1, 0, 1, 2]))
+            msa,
+            TabularMSA([DNA('AA')]))
 
-    def test_minter_and_index(self):
-        with six.assertRaisesRegex(self, ValueError, 'both.*minter.*index'):
-            TabularMSA([]).extend([DNA('ACGT')], minter=str, index=['foo'])
 
-    def test_no_minter_no_index_to_empty(self):
+class TestExtend(unittest.TestCase):
+    # Error cases
+    #
+    # Note: these tests check that the MSA isn't mutated when an error is
+    # raised. Where applicable, the "invalid" sequence is preceded by valid
+    # sequence(s) to test one possible (buggy) implementation of `extend`:
+    # looping over `sequences` and calling `append`. These tests ensure that
+    # valid sequences aren't appended to the MSA before the error is raised.
+    def test_invalid_minter_index_reset_index_parameter_combos(self):
         msa = TabularMSA([])
 
-        msa.extend([DNA('ACGT'), DNA('TGCA')])
-
-        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
-
-    def test_no_minter_no_index_to_non_empty(self):
-        msa = TabularMSA([DNA('ACGT')])
+        param_combos = (
+            {},
+            {'minter': str, 'index': 'foo', 'reset_index': True},
+            {'minter': str, 'index': 'foo'},
+            {'minter': str, 'reset_index': True},
+            {'index': 'foo', 'reset_index': True}
+        )
 
-        msa.extend([DNA('TGCA'), DNA('--..')])
+        for params in param_combos:
+            with self.assertRaisesRegex(ValueError,
+                                        "one of.*minter.*index.*reset_index"):
+                msa.extend([DNA('ACGT')], **params)
 
-        self.assertEqual(msa,
-                         TabularMSA([DNA('ACGT'), DNA('TGCA'), DNA('--..')]))
+            self.assertEqual(msa, TabularMSA([]))
 
-    def test_no_minter_no_index_msa_has_non_default_labels(self):
-        msa = TabularMSA([DNA('ACGT')], index=[1])
+    def test_from_tabular_msa_index_param_still_required(self):
+        msa = TabularMSA([DNA('AC'), DNA('TG')])
 
-        with six.assertRaisesRegex(self, ValueError, 'provide.*minter.*index'):
-            msa.extend([DNA('TGCA')])
+        with self.assertRaisesRegex(ValueError,
+                                    "one of.*minter.*index.*reset_index"):
+            msa.extend(TabularMSA([DNA('GG'), DNA('CC')]))
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=[1]))
+        self.assertEqual(msa, TabularMSA([DNA('AC'), DNA('TG')]))
 
     def test_invalid_dtype(self):
         msa = TabularMSA([])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'GrammaredSequence.*Sequence'):
-            msa.extend([Sequence('')])
+        with self.assertRaisesRegex(TypeError, 'GrammaredSequence.*Sequence'):
+            msa.extend([Sequence('')], reset_index=True)
 
         self.assertEqual(msa, TabularMSA([]))
 
-    def test_invalid_minter(self):
-        # This test (and the following error case tests) check that the MSA
-        # isn't mutated when an error is raised. The "invalid" sequence is
-        # preceded by valid sequence(s) to test one possible (buggy)
-        # implementation of extend(): looping over sequences and calling
-        # append(). These tests ensure that "valid" sequences aren't appended
-        # to the MSA before the error is raised.
-        msa = TabularMSA([DNA('ACGT')], index=['foo'])
+    def test_dtype_mismatch_rna(self):
+        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
 
-        with self.assertRaises(KeyError):
-            msa.extend([DNA('AAAA', metadata={'id': 'foo'}),
-                        DNA('----')], minter='id')
+        with self.assertRaisesRegex(TypeError, 'matching type.*RNA.*DNA'):
+            msa.extend([DNA('----'), RNA('UUUU')], reset_index=True)
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
 
-    def test_mismatched_dtype(self):
+    def test_dtype_mismatch_float(self):
         msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'matching type.*RNA.*DNA'):
-            msa.extend([DNA('----'), RNA('UUUU')])
+        with self.assertRaisesRegex(TypeError, 'matching type.*float.*DNA'):
+            msa.extend([DNA('GGGG'), 42.0], reset_index=True)
 
         self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
 
-    def test_wrong_dtype_float(self):
+    def test_length_mismatch(self):
         msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'matching type.*float.*DNA'):
-            msa.extend([DNA('GGGG'), 42.0])
+        with self.assertRaisesRegex(
+                ValueError, 'must match the number of positions.*5 != 4'):
+            msa.extend([DNA('TTTT'), DNA('ACGTA')], reset_index=True)
 
         self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
 
-    def test_wrong_length(self):
-        msa = TabularMSA([DNA('ACGT'), DNA('TGCA')])
+    def test_invalid_minter(self):
+        msa = TabularMSA([DNA('ACGT')], index=['foo'])
 
-        with six.assertRaisesRegex(
-                self, ValueError,
-                'must match the number of positions.*5 != 4'):
-            msa.extend([DNA('TTTT'), DNA('ACGTA')])
+        with self.assertRaises(KeyError):
+            msa.extend([DNA('AAAA', metadata={'id': 'foo'}),
+                        DNA('----')], minter='id')
 
-        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
+
+    def test_invalid_index(self):
+        msa = TabularMSA([DNA('ACGT')], index=['foo'])
+
+        with self.assertRaises(TypeError):
+            msa.extend([DNA('----')], index=42)
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT')], index=['foo']))
 
     def test_sequences_index_length_mismatch(self):
         msa = TabularMSA([])
 
-        with six.assertRaisesRegex(
-                self, ValueError,
-                'sequences.*2.*index length.*3'):
+        with self.assertRaisesRegex(ValueError,
+                                    'sequences.*2.*index length.*3'):
             msa.extend([DNA('TTTT'), DNA('ACGT')], index=['a', 'b', 'c'])
 
         self.assertEqual(msa, TabularMSA([]))
 
-    def test_with_minter_metadata_key(self):
+    # Valid cases: `minter`
+    def test_minter_empty_msa(self):
+        msa = TabularMSA([])
+
+        msa.extend([RNA('UU'), RNA('--')], minter=str)
+
+        self.assertEqual(msa, TabularMSA([RNA('UU'), RNA('--')], minter=str))
+
+    def test_minter_metadata_key(self):
         msa = TabularMSA([DNA('', metadata={'id': 'a'}),
                           DNA('', metadata={'id': 'b'})],
                          minter='id')
@@ -2161,7 +2250,7 @@ class TestExtend(unittest.TestCase):
                 DNA('', metadata={'id': 'c'}),
                 DNA('', metadata={'id': 'd'})], minter='id'))
 
-    def test_with_minter_callable(self):
+    def test_minter_callable(self):
         msa = TabularMSA([DNA('A', metadata={'id': 'a'}),
                           DNA('C', metadata={'id': 'b'})],
                          minter='id')
@@ -2176,7 +2265,47 @@ class TestExtend(unittest.TestCase):
                 DNA('G'),
                 DNA('T')], index=['a', 'b', 'G', 'T']))
 
-    def test_with_index(self):
+    def test_multiindex_minter_empty_msa(self):
+        def multiindex_minter(seq):
+            if str(seq) == 'AC':
+                return ('foo', 42)
+            else:
+                return ('bar', 43)
+
+        msa = TabularMSA([])
+
+        msa.extend([DNA('AC'), DNA('GG')], minter=multiindex_minter)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+
+    def test_multiindex_minter_non_empty_msa(self):
+        def multiindex_minter(seq):
+            if str(seq) == 'C':
+                return ('baz', 44)
+            else:
+                return ('baz', 45)
+
+        msa = TabularMSA([DNA('A'), DNA('G')],
+                         index=[('foo', 42), ('bar', 43)])
+
+        msa.extend([DNA('C'), DNA('T')], minter=multiindex_minter)
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(
+            msa.index,
+            pd.Index([('foo', 42), ('bar', 43), ('baz', 44), ('baz', 45)]))
+
+    # Valid cases: `index`
+    def test_index_empty_msa(self):
+        msa = TabularMSA([])
+
+        msa.extend([RNA('UAC'), RNA('AAU')], index=['foo', 'bar'])
+
+        self.assertEqual(msa, TabularMSA([RNA('UAC'), RNA('AAU')],
+                                         index=['foo', 'bar']))
+
+    def test_index_non_empty_msa(self):
         msa = TabularMSA([DNA('AC'), DNA('GT')], index=['a', 'b'])
 
         msa.extend([DNA('--'), DNA('..')], index=['foo', 'bar'])
@@ -2186,7 +2315,108 @@ class TestExtend(unittest.TestCase):
             TabularMSA([DNA('AC'), DNA('GT'), DNA('--'), DNA('..')],
                        index=['a', 'b', 'foo', 'bar']))
 
-    def test_with_index_type_change(self):
+    def test_multiindex_index_empty_msa(self):
+        msa = TabularMSA([])
+
+        msa.extend([DNA('AA'), DNA('GG')], index=[('foo', 42), ('bar', 43)])
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+
+    def test_multiindex_index_non_empty_msa(self):
+        msa = TabularMSA([DNA('.'), DNA('-')],
+                         index=[('foo', 42), ('bar', 43)])
+
+        msa.extend([DNA('A'), DNA('G')], index=[('baz', 44), ('baz', 45)])
+
+        self.assertIsInstance(msa.index, pd.MultiIndex)
+        assert_index_equal(
+            msa.index,
+            pd.Index([('foo', 42), ('bar', 43), ('baz', 44), ('baz', 45)]))
+
+    def test_index_object_empty_msa(self):
+        msa = TabularMSA([])
+
+        msa.extend([DNA('AA'), DNA('GG')], index=pd.RangeIndex(2))
+
+        self.assertEqual(msa, TabularMSA([DNA('AA'), DNA('GG')]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
+
+    def test_index_object_non_empty_msa(self):
+        msa = TabularMSA([DNA('CT'), DNA('GG')])
+
+        msa.extend([DNA('AA'), DNA('GG')], index=pd.RangeIndex(2))
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('CT'), DNA('GG'), DNA('AA'), DNA('GG')],
+                       index=[0, 1, 0, 1]))
+
+    # Valid cases: `reset_index`
+    def test_reset_index_empty_msa(self):
+        msa = TabularMSA([])
+
+        msa.extend([DNA('ACGT'), DNA('----')], reset_index=True)
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('----')]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
+
+    def test_reset_index_empty_msa_empty_iterable(self):
+        msa = TabularMSA([])
+
+        msa.extend([], reset_index=True)
+
+        self.assertEqual(msa, TabularMSA([]))
+        assert_index_equal(msa.index, pd.RangeIndex(0))
+
+    def test_reset_index_non_empty_msa_empty_iterable(self):
+        msa = TabularMSA([RNA('UU'), RNA('CC')], index=['a', 'b'])
+
+        msa.extend([], reset_index=True)
+
+        self.assertEqual(msa, TabularMSA([RNA('UU'), RNA('CC')]))
+        assert_index_equal(msa.index, pd.RangeIndex(2))
+
+    def test_reset_index_default_index(self):
+        msa = TabularMSA([DNA('A'), DNA('G')])
+
+        msa.extend([DNA('.'), DNA('-')], reset_index=True)
+
+        self.assertEqual(msa,
+                         TabularMSA([DNA('A'), DNA('G'), DNA('.'), DNA('-')]))
+        assert_index_equal(msa.index, pd.RangeIndex(4))
+
+    def test_reset_index_non_default_index(self):
+        msa = TabularMSA([DNA('A'), DNA('G')], index=['a', 'b'])
+
+        msa.extend([DNA('.'), DNA('-')], reset_index=True)
+
+        self.assertEqual(msa,
+                         TabularMSA([DNA('A'), DNA('G'), DNA('.'), DNA('-')]))
+        assert_index_equal(msa.index, pd.RangeIndex(4))
+
+    def test_reset_index_from_tabular_msa(self):
+        msa = TabularMSA([DNA('AC'), DNA('TG')], index=[42, 43])
+
+        msa.extend(TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')],
+                              index=['a', 'b', 'c']), reset_index=True)
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
+                        DNA('AA')]))
+        assert_index_equal(msa.index, pd.RangeIndex(5))
+
+    def test_reset_index_bool_cast(self):
+        msa = TabularMSA([RNA('AC'), RNA('UU')], index=[42, 43])
+
+        msa.extend([RNA('..')], reset_index='abc')
+
+        self.assertEqual(msa, TabularMSA([RNA('AC'), RNA('UU'), RNA('..')]))
+        assert_index_equal(msa.index, pd.RangeIndex(3))
+
+    # Valid cases (misc)
+    def test_index_type_change(self):
         msa = TabularMSA([DNA('A'), DNA('.')])
 
         msa.extend([DNA('C')], index=['foo'])
@@ -2195,93 +2425,117 @@ class TestExtend(unittest.TestCase):
             msa,
             TabularMSA([DNA('A'), DNA('.'), DNA('C')], index=[0, 1, 'foo']))
 
-    def test_multiple_extends_to_empty_msa_with_default_labels(self):
-        msa = TabularMSA([])
+    def test_duplicate_index(self):
+        msa = TabularMSA([DNA('A'), DNA('.')], index=['foo', 'bar'])
 
-        msa.extend([RNA('U-'), RNA('GG')])
-        msa.extend([RNA('AA')])
+        msa.extend([DNA('C'), DNA('.')], index=['foo', 'baz'])
 
-        self.assertEqual(msa, TabularMSA([RNA('U-'), RNA('GG'), RNA('AA')]))
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('A'), DNA('.'), DNA('C'), DNA('.')],
+                       index=['foo', 'bar', 'foo', 'baz']))
 
-    def test_multiple_extends_to_non_empty_msa_with_default_labels(self):
-        msa = TabularMSA([RNA('U--'), RNA('AA.')])
+    def test_empty_msa_with_positional_metadata_no_new_positions(self):
+        msa = TabularMSA([], positional_metadata={'foo': []})
 
-        msa.extend([RNA('ACG'), RNA('GCA')])
-        msa.extend([RNA('U-U')])
+        msa.extend([DNA(''), DNA('')], reset_index=True)
 
         self.assertEqual(
             msa,
-            TabularMSA([RNA('U--'),
-                        RNA('AA.'),
-                        RNA('ACG'),
-                        RNA('GCA'),
-                        RNA('U-U')]))
+            TabularMSA([DNA(''), DNA('')], positional_metadata={'foo': []}))
+
+    def test_empty_msa_with_positional_metadata_add_new_positions(self):
+        # bug in 0.4.2
+        msa = TabularMSA([], positional_metadata={'foo': []})
 
-    def test_with_multiindex_index(self):
+        msa.extend([DNA('AA'), DNA('GG')], reset_index=True)
+
+        self.assertEqual(
+            msa,
+            TabularMSA([DNA('AA'),
+                        DNA('GG')]))
+
+    def test_empty_msa_empty_iterable(self):
         msa = TabularMSA([])
 
-        msa.extend([DNA('AA'), DNA('GG')], index=[('foo', 42), ('bar', 43)])
+        msa.extend([], minter=str)
 
-        self.assertIsInstance(msa.index, pd.MultiIndex)
-        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+        self.assertEqual(msa, TabularMSA([]))
 
-    def test_with_multiindex_minter(self):
-        def multiindex_minter(seq):
-            if str(seq) == 'AC':
-                return ('foo', 42)
-            else:
-                return ('bar', 43)
+    def test_non_empty_msa_empty_iterable(self):
+        msa = TabularMSA([DNA('AC')], index=['foo'])
 
-        msa = TabularMSA([])
+        msa.extend([], index=[])
 
-        msa.extend([DNA('AC'), DNA('GG')], minter=multiindex_minter)
+        self.assertEqual(msa, TabularMSA([DNA('AC')], index=['foo']))
 
-        self.assertIsInstance(msa.index, pd.MultiIndex)
-        assert_index_equal(msa.index, pd.Index([('foo', 42), ('bar', 43)]))
+    def test_single_sequence(self):
+        msa = TabularMSA([DNA('AC')])
+
+        msa.extend([DNA('-C')], minter=str)
+
+        self.assertEqual(msa,
+                         TabularMSA([DNA('AC'), DNA('-C')], index=[0, '-C']))
+
+    def test_multiple_sequences(self):
+        msa = TabularMSA([DNA('AC')])
+
+        msa.extend([DNA('-C'), DNA('AG')], minter=str)
 
-    def test_with_index_object(self):
+        self.assertEqual(msa,
+                         TabularMSA([DNA('AC'), DNA('-C'), DNA('AG')],
+                                    index=[0, '-C', 'AG']))
+
+    def test_from_iterable(self):
         msa = TabularMSA([])
 
-        msa.extend([DNA('AA'), DNA('GG')],
-                   index=pd.Index(['foo', 'bar']))
+        msa.extend(iter([DNA('ACGT'), DNA('TGCA')]), reset_index=True)
+
+        self.assertEqual(msa, TabularMSA([DNA('ACGT'), DNA('TGCA')]))
+
+    def test_from_tabular_msa_with_index(self):
+        msa1 = TabularMSA([DNA('AC'), DNA('TG')])
+        msa2 = TabularMSA([DNA('GG'), DNA('CC'), DNA('AA')])
+
+        msa1.extend(msa2, index=msa2.index)
 
         self.assertEqual(
-            msa,
-            TabularMSA([DNA('AA'),
-                        DNA('GG')], index=['foo', 'bar']))
+            msa1,
+            TabularMSA([DNA('AC'), DNA('TG'), DNA('GG'), DNA('CC'),
+                        DNA('AA')], index=[0, 1, 0, 1, 2]))
 
 
 class TestJoin(unittest.TestCase):
     def test_invalid_how(self):
-        with six.assertRaisesRegex(self, ValueError, '`how`'):
+        with self.assertRaisesRegex(ValueError, '`how`'):
             TabularMSA([]).join(TabularMSA([]), how='really')
 
     def test_invalid_other_type(self):
-        with six.assertRaisesRegex(self, TypeError, 'TabularMSA.*DNA'):
+        with self.assertRaisesRegex(TypeError, 'TabularMSA.*DNA'):
             TabularMSA([]).join(DNA('ACGT'))
 
     def test_dtype_mismatch(self):
-        with six.assertRaisesRegex(self, TypeError, 'dtype.*RNA.*DNA'):
+        with self.assertRaisesRegex(TypeError, 'dtype.*RNA.*DNA'):
             TabularMSA([DNA('AC')]).join(TabularMSA([RNA('UG')]))
 
-        with six.assertRaisesRegex(self, TypeError, 'dtype.*None.*DNA'):
+        with self.assertRaisesRegex(TypeError, 'dtype.*None.*DNA'):
             TabularMSA([DNA('AC')]).join(TabularMSA([]))
 
-        with six.assertRaisesRegex(self, TypeError, 'dtype.*DNA.*None'):
+        with self.assertRaisesRegex(TypeError, 'dtype.*DNA.*None'):
             TabularMSA([]).join(TabularMSA([DNA('AC')]))
 
     def test_duplicate_index_labels(self):
-        with six.assertRaisesRegex(self, ValueError,
-                                   "This MSA's index labels.*unique"):
+        with self.assertRaisesRegex(ValueError,
+                                    "This MSA's index labels.*unique"):
             TabularMSA([DNA('AC'), DNA('--')], index=[0, 0]).join(
                 TabularMSA([DNA('GT'), DNA('..')]))
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   "`other`'s index labels.*unique"):
+        with self.assertRaisesRegex(ValueError,
+                                    "`other`'s index labels.*unique"):
             TabularMSA([DNA('AC'), DNA('--')]).join(
                 TabularMSA([DNA('GT'), DNA('..')], index=[0, 0]))
 
-    def test_handles_missing_metadata_efficiently(self):
+    def test_no_metadata(self):
         msa1 = TabularMSA([DNA('AC'),
                            DNA('G.')])
         msa2 = TabularMSA([DNA('-C'),
@@ -2293,12 +2547,6 @@ class TestJoin(unittest.TestCase):
             joined,
             TabularMSA([DNA('AC-C'),
                         DNA('G..G')]))
-        self.assertIsNone(msa1._metadata)
-        self.assertIsNone(msa1._positional_metadata)
-        self.assertIsNone(msa2._metadata)
-        self.assertIsNone(msa2._positional_metadata)
-        self.assertIsNone(joined._metadata)
-        self.assertIsNone(joined._positional_metadata)
 
     def test_ignores_metadata(self):
         msa1 = TabularMSA([DNA('AC', metadata={'id': 'a'}),
@@ -2413,8 +2661,7 @@ class TestJoin(unittest.TestCase):
                            DNA('CA'),
                            DNA('--')])
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'Index labels must all match'):
+        with self.assertRaisesRegex(ValueError, 'Index labels must all match'):
             msa1.join(msa2)
 
     def test_how_strict_failure_positional_metadata_mismatch(self):
@@ -2426,8 +2673,8 @@ class TestJoin(unittest.TestCase):
                            DNA('.G')],
                           positional_metadata={'foo': [3, 4]})
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'Positional metadata columns.*match'):
+        with self.assertRaisesRegex(ValueError,
+                                    'Positional metadata columns.*match'):
             msa1.join(msa2)
 
     def test_how_inner(self):
@@ -2586,6 +2833,13 @@ class TestIterPositions(unittest.TestCase):
 
         self.assertEqual(obs, [])
 
+    def test_no_sequences_ignore_metadata(self):
+        msa = TabularMSA([])
+
+        obs = list(msa.iter_positions(ignore_metadata=True))
+
+        self.assertEqual(obs, [])
+
     def test_no_sequences_reverse(self):
         msa = TabularMSA([])
 
@@ -2593,6 +2847,13 @@ class TestIterPositions(unittest.TestCase):
 
         self.assertEqual(obs, [])
 
+    def test_no_sequences_reverse_ignore_metadata(self):
+        msa = TabularMSA([])
+
+        obs = list(msa.iter_positions(reverse=True, ignore_metadata=True))
+
+        self.assertEqual(obs, [])
+
     def test_no_positions(self):
         msa = TabularMSA([DNA(''),
                           DNA('')])
@@ -2601,6 +2862,14 @@ class TestIterPositions(unittest.TestCase):
 
         self.assertEqual(obs, [])
 
+    def test_no_positions_ignore_metadata(self):
+        msa = TabularMSA([DNA(''),
+                          DNA('')])
+
+        obs = list(msa.iter_positions(ignore_metadata=True))
+
+        self.assertEqual(obs, [])
+
     def test_no_positions_reverse(self):
         msa = TabularMSA([DNA(''),
                           DNA('')])
@@ -2609,6 +2878,14 @@ class TestIterPositions(unittest.TestCase):
 
         self.assertEqual(obs, [])
 
+    def test_no_positions_reverse_ignore_metadata(self):
+        msa = TabularMSA([DNA(''),
+                          DNA('')])
+
+        obs = list(msa.iter_positions(reverse=True, ignore_metadata=True))
+
+        self.assertEqual(obs, [])
+
     def test_single_position(self):
         msa = TabularMSA([DNA('A')])
 
@@ -2687,15 +2964,19 @@ class TestIterPositions(unittest.TestCase):
                       positional_metadata={'foo': [42, np.nan, -1],
                                            'bar': [np.nan, np.nan, 'baz']})])
 
-    def test_handles_missing_positional_metadata_efficiently(self):
-        msa = TabularMSA([DNA('AC'),
-                          DNA('A-')])
-
-        self.assertIsNone(msa._positional_metadata)
+    def test_with_positional_metadata_ignore_metadata(self):
+        # MSA *and* sequence positional metadata.
+        msa_positional_metadata = {'pm1': [0.5, 1.5], 'foo': [9, 99]}
+        seqs = [
+            DNA('AC', positional_metadata={'foo': [42, 43]}),
+            DNA('A-'),
+            DNA('--', positional_metadata={'foo': [-1, -2],
+                                           'bar': ['baz', 'bazz']})]
+        msa = TabularMSA(seqs, positional_metadata=msa_positional_metadata)
 
-        list(msa.iter_positions())
+        obs = list(msa.iter_positions(ignore_metadata=True))
 
-        self.assertIsNone(msa._positional_metadata)
+        self.assertEqual(obs, [Sequence('AA-'), Sequence('C--')])
 
 
 class TestConsensus(unittest.TestCase):
@@ -2789,17 +3070,6 @@ class TestConsensus(unittest.TestCase):
             DNA('A-T', positional_metadata={'foo': [42, 43, 42],
                                             'bar': ['a', 'b', 'c']}))
 
-    def test_handles_missing_positional_metadata_efficiently(self):
-        msa = TabularMSA([DNA('AC'),
-                          DNA('AC')])
-
-        self.assertIsNone(msa._positional_metadata)
-
-        cons = msa.consensus()
-
-        self.assertIsNone(msa._positional_metadata)
-        self.assertIsNone(cons._positional_metadata)
-
     def test_mixed_gap_characters_as_majority(self):
         seqs = [
             DNA('A'),
@@ -3075,31 +3345,31 @@ class TestConservation(unittest.TestCase):
     def test_bad_metric(self):
         msa = TabularMSA([DNA('AA'),
                           DNA('A-')])
-        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+        with self.assertRaisesRegex(ValueError, 'xyz'):
             msa.conservation(metric='xyz')
 
         msa = TabularMSA([])
-        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+        with self.assertRaisesRegex(ValueError, 'xyz'):
             msa.conservation(metric='xyz')
 
     def test_bad_gap_mode(self):
         msa = TabularMSA([DNA('AA'),
                           DNA('A-')])
-        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+        with self.assertRaisesRegex(ValueError, 'xyz'):
             msa.conservation(gap_mode='xyz')
 
         msa = TabularMSA([])
-        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+        with self.assertRaisesRegex(ValueError, 'xyz'):
             msa.conservation(gap_mode='xyz')
 
     def test_bad_degenerate_mode(self):
         msa = TabularMSA([DNA('AA'),
                           DNA('A-')])
-        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+        with self.assertRaisesRegex(ValueError, 'xyz'):
             msa.conservation(degenerate_mode='xyz')
 
         msa = TabularMSA([])
-        with six.assertRaisesRegex(self, ValueError, 'xyz'):
+        with self.assertRaisesRegex(ValueError, 'xyz'):
             msa.conservation(degenerate_mode='xyz')
 
 
@@ -3113,11 +3383,11 @@ class TestGapFrequencies(unittest.TestCase):
         npt.assert_array_equal(np.array([1, 0, 2]), freqs)
 
     def test_invalid_axis_str(self):
-        with six.assertRaisesRegex(self, ValueError, "axis.*'foo'"):
+        with self.assertRaisesRegex(ValueError, "axis.*'foo'"):
             TabularMSA([]).gap_frequencies(axis='foo')
 
     def test_invalid_axis_int(self):
-        with six.assertRaisesRegex(self, ValueError, "axis.*2"):
+        with self.assertRaisesRegex(ValueError, "axis.*2"):
             TabularMSA([]).gap_frequencies(axis=2)
 
     def test_position_axis_str_and_int_equivalent(self):
@@ -3304,7 +3574,7 @@ class TestGapFrequencies(unittest.TestCase):
 
             @classproperty
             @overrides(GrammaredSequence)
-            def nondegenerate_chars(cls):
+            def definite_chars(cls):
                 return set('')
 
             @classproperty
@@ -3332,7 +3602,7 @@ class TestGapFrequencies(unittest.TestCase):
 
             @classproperty
             @overrides(GrammaredSequence)
-            def nondegenerate_chars(cls):
+            def definite_chars(cls):
                 return set('ABC-.')
 
             @classproperty
@@ -3371,25 +3641,17 @@ class TestGetPosition(unittest.TestCase):
         self.assertEqual(position,
                          Sequence('C-', metadata={'foo': 43, 'bar': 'def'}))
 
-    def test_handles_positional_metadata_efficiently(self):
-        msa = TabularMSA([DNA('AA'),
-                          DNA('--')])
-
-        msa._get_position_(1)
-
-        self.assertIsNone(msa._positional_metadata)
-
 
 class TestIsSequenceAxis(unittest.TestCase):
     def setUp(self):
         self.msa = TabularMSA([])
 
     def test_invalid_str(self):
-        with six.assertRaisesRegex(self, ValueError, "axis.*'foo'"):
+        with self.assertRaisesRegex(ValueError, "axis.*'foo'"):
             self.msa._is_sequence_axis('foo')
 
     def test_invalid_int(self):
-        with six.assertRaisesRegex(self, ValueError, "axis.*2"):
+        with self.assertRaisesRegex(ValueError, "axis.*2"):
             self.msa._is_sequence_axis(2)
 
     def test_positive_str(self):
@@ -3405,13 +3667,22 @@ class TestIsSequenceAxis(unittest.TestCase):
         self.assertFalse(self.msa._is_sequence_axis(1))
 
 
+class TestHashable(unittest.TestCase):
+    def test_unhashable_type(self):
+        self.assertNotIsInstance(TabularMSA([]), collections.Hashable)
+
+    def test_unhashable_object(self):
+        with self.assertRaisesRegex(TypeError, 'unhashable'):
+            hash(TabularMSA([]))
+
+
 class TestRepr(unittest.TestCase):
     def test_repr(self):
         # basic sanity checks -- more extensive testing of formatting and
         # special cases is performed in TabularMSAReprDoctests below. here we
         # only test that pieces of the repr are present. these tests also
-        # exercise coverage for py2/3 since the doctests in
-        # TabularMSAReprDoctests only currently run in py3.
+        # exercise coverage in case doctests stop counting towards coverage in
+        # the future
 
         # str calls repr
         self.assertEqual(repr(TabularMSA([])), str(TabularMSA([])))
@@ -3471,9 +3742,9 @@ class TestRepr(unittest.TestCase):
 #
 # these doctests exercise the correct formatting of TabularMSA's repr in a
 # variety of situations. they are more extensive than the unit tests above
-# (TestRepr.test_repr) but are only currently run in py3. thus, they cannot
-# be relied upon for coverage (the unit tests take care of this)
-class TabularMSAReprDoctests(object):
+# (TestRepr.test_repr) but cannot be relied upon for coverage (the unit tests
+# take care of this)
+class TabularMSAReprDoctests:
     r"""
     >>> from skbio import DNA, TabularMSA
 
diff --git a/skbio/diversity/__init__.py b/skbio/diversity/__init__.py
index 0f6b613..52b3e4a 100644
--- a/skbio/diversity/__init__.py
+++ b/skbio/diversity/__init__.py
@@ -162,6 +162,7 @@ Functions
 
     alpha_diversity
     beta_diversity
+    partial_beta_diversity
     get_alpha_diversity_metrics
     get_beta_diversity_metrics
 
@@ -377,10 +378,10 @@ Create a matrix containing 6 samples (rows) and 7 OTUs (columns):
    >>> plt.close('all') # not necessary for normal use
    >>> fig = sample_md.boxplot(column='Faith PD', by='body_site')
 
-We can also compute Spearman correlations between all pairs of columns in this
-``DataFrame``. Since our alpha diversity metrics are the only two numeric
-columns (and thus the only columns for which Spearman correlation is relevant),
-this will give us a symmetric 2x2 correlation matrix.
+We can also compute Spearman correlations between all pairs of columns in
+this ``DataFrame``. Since our alpha diversity metrics are the only two
+numeric columns (and thus the only columns for which Spearman correlation
+is relevant), this will give us a symmetric 2x2 correlation matrix.
 
 >>> sample_md.corr(method="spearman")
                Observed OTUs  Faith PD
@@ -397,14 +398,12 @@ Faith PD            0.939336  1.000000
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
-from ._driver import (alpha_diversity, beta_diversity,
+from ._driver import (alpha_diversity, beta_diversity, partial_beta_diversity,
                       get_alpha_diversity_metrics, get_beta_diversity_metrics)
 
 __all__ = ["alpha_diversity", "beta_diversity", "get_alpha_diversity_metrics",
-           "get_beta_diversity_metrics"]
+           "get_beta_diversity_metrics", "partial_beta_diversity"]
 
 test = TestRunner(__file__).test
diff --git a/skbio/diversity/_driver.py b/skbio/diversity/_driver.py
index 61aa403..9821485 100644
--- a/skbio/diversity/_driver.py
+++ b/skbio/diversity/_driver.py
@@ -6,10 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import functools
+import itertools
 
+import numpy as np
 import scipy.spatial.distance
 import pandas as pd
 
@@ -18,7 +18,7 @@ from skbio.diversity.alpha._faith_pd import _faith_pd, _setup_faith_pd
 from skbio.diversity.beta._unifrac import (
     _setup_multiple_unweighted_unifrac, _setup_multiple_weighted_unifrac,
     _normalize_weighted_unifrac_by_default)
-from skbio.util._decorator import experimental
+from skbio.util._decorator import experimental, deprecated
 from skbio.stats.distance import DistanceMatrix
 from skbio.diversity._util import (_validate_counts_matrix,
                                    _get_phylogenetic_kwargs)
@@ -182,6 +182,103 @@ def alpha_diversity(metric, counts, ids=None, validate=True, **kwargs):
     return pd.Series(results, index=ids)
 
 
+ at deprecated(as_of='0.5.0', until='0.5.1',
+            reason=('The return type is unstable. Developer caution is '
+                    'advised. The resulting DistanceMatrix object will '
+                    'include zeros when distance has not been calculated, and '
+                    'therefore can be misleading.'))
+def partial_beta_diversity(metric, counts, ids, id_pairs, validate=True,
+                           **kwargs):
+    """Compute distances only between specified ID pairs
+
+    Parameters
+    ----------
+    metric : str or callable
+        The pairwise distance function to apply. If ``metric`` is a string, it
+        must be resolvable by scikit-bio (e.g., UniFrac methods), or must be
+        callable.
+    counts : 2D array_like of ints or floats
+        Matrix containing count/abundance data where each row contains counts
+        of OTUs in a given sample.
+    ids : iterable of strs
+        Identifiers for each sample in ``counts``.
+    id_pairs : iterable of tuple
+        An iterable of tuples of IDs to compare (e.g., ``[('a', 'b'), ('a',
+        'c'), ...])``. If specified, the set of IDs described must be a subset
+        of ``ids``.
+    validate : bool, optional
+        See ``skbio.diversity.beta_diversity`` for details.
+    kwargs : kwargs, optional
+        Metric-specific parameters.
+
+    Returns
+    -------
+    skbio.DistanceMatrix
+        Distances between pairs of samples indicated by id_pairs. Pairwise
+        distances not defined by id_pairs will be 0.0. Use this resulting
+        DistanceMatrix with caution as 0.0 is a valid distance.
+
+    Raises
+    ------
+    ValueError
+        If ``ids`` are not specified.
+        If ``id_pairs`` are not a subset of ``ids``.
+        If ``metric`` is not a callable or is unresolvable string by
+        scikit-bio.
+        If duplicates are observed in ``id_pairs``.
+
+    See Also
+    --------
+    skbio.diversity.beta_diversity
+    skbio.diversity.get_beta_diversity_metrics
+
+    """
+    if validate:
+        counts = _validate_counts_matrix(counts, ids=ids)
+
+    id_pairs = list(id_pairs)
+    all_ids_in_pairs = set(itertools.chain.from_iterable(id_pairs))
+    if not all_ids_in_pairs.issubset(ids):
+        raise ValueError("`id_pairs` are not a subset of `ids`")
+
+    hashes = {i for i in id_pairs}.union({i[::-1] for i in id_pairs})
+    if len(hashes) != len(id_pairs) * 2:
+        raise ValueError("A duplicate or a self-self pair was observed.")
+
+    if metric == 'unweighted_unifrac':
+        otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
+        metric, counts_by_node = _setup_multiple_unweighted_unifrac(
+                counts, otu_ids=otu_ids, tree=tree, validate=validate)
+        counts = counts_by_node
+    elif metric == 'weighted_unifrac':
+        # get the value for normalized. if it was not provided, it will fall
+        # back to the default value inside of _weighted_unifrac_pdist_f
+        normalized = kwargs.pop('normalized',
+                                _normalize_weighted_unifrac_by_default)
+        otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
+        metric, counts_by_node = _setup_multiple_weighted_unifrac(
+                counts, otu_ids=otu_ids, tree=tree, normalized=normalized,
+                validate=validate)
+        counts = counts_by_node
+    elif callable(metric):
+        metric = functools.partial(metric, **kwargs)
+        # remove all values from kwargs, since they have already been provided
+        # through the partial
+        kwargs = {}
+    else:
+        raise ValueError("partial_beta_diversity is only compatible with "
+                         "optimized unifrac methods and callable functions.")
+
+    dm = np.zeros((len(ids), len(ids)), dtype=float)
+    id_index = {id_: idx for idx, id_ in enumerate(ids)}
+    id_pairs_indexed = ((id_index[u], id_index[v]) for u, v in id_pairs)
+
+    for u, v in id_pairs_indexed:
+        dm[u, v] = metric(counts[u], counts[v], **kwargs)
+
+    return DistanceMatrix(dm + dm.T, ids)
+
+
 @experimental(as_of="0.4.0")
 def beta_diversity(metric, counts, ids=None, validate=True, pairwise_func=None,
                    **kwargs):
@@ -245,9 +342,6 @@ def beta_diversity(metric, counts, ids=None, validate=True, pairwise_func=None,
     if validate:
         counts = _validate_counts_matrix(counts, ids=ids)
 
-    if pairwise_func is None:
-        pairwise_func = scipy.spatial.distance.pdist
-
     if metric == 'unweighted_unifrac':
         otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
         metric, counts_by_node = _setup_multiple_unweighted_unifrac(
@@ -273,5 +367,8 @@ def beta_diversity(metric, counts, ids=None, validate=True, pairwise_func=None,
         # example one of the SciPy metrics
         pass
 
+    if pairwise_func is None:
+        pairwise_func = scipy.spatial.distance.pdist
+
     distances = pairwise_func(counts, metric=metric, **kwargs)
     return DistanceMatrix(distances, ids)
diff --git a/skbio/diversity/_util.py b/skbio/diversity/_util.py
index e54c2a8..d3705ee 100644
--- a/skbio/diversity/_util.py
+++ b/skbio/diversity/_util.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import collections
 
 import numpy as np
@@ -35,7 +33,7 @@ def _validate_counts_vector(counts, suppress_cast=False):
     return counts
 
 
-def _validate_counts_matrix(counts, ids=None, **kwargs):
+def _validate_counts_matrix(counts, ids=None, suppress_cast=False):
     results = []
 
     # handle case of where counts is a single vector by making it a matrix.
@@ -54,11 +52,6 @@ def _validate_counts_matrix(counts, ids=None, **kwargs):
             "Number of rows in ``counts`` must be equal to number of provided "
             "``ids``.")
 
-    # py2-compatible mechanism for specifying a keyword argument when also
-    # passing *args derived from SO answer:
-    # http://stackoverflow.com/a/15302038/3424666
-    suppress_cast = kwargs.pop('suppress_cast', False)
-
     lens = []
     for v in counts:
         results.append(_validate_counts_vector(v, suppress_cast))
@@ -70,7 +63,6 @@ def _validate_counts_matrix(counts, ids=None, **kwargs):
 
 
 def _validate_otu_ids_and_tree(counts, otu_ids, tree):
-
     len_otu_ids = len(otu_ids)
     set_otu_ids = set(otu_ids)
     if len_otu_ids != len(set_otu_ids):
diff --git a/skbio/diversity/alpha/__init__.py b/skbio/diversity/alpha/__init__.py
index d37a936..6453bd9 100644
--- a/skbio/diversity/alpha/__init__.py
+++ b/skbio/diversity/alpha/__init__.py
@@ -56,8 +56,6 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._ace import ace
diff --git a/skbio/diversity/alpha/_ace.py b/skbio/diversity/alpha/_ace.py
index 8184791..0106525 100644
--- a/skbio/diversity/alpha/_ace.py
+++ b/skbio/diversity/alpha/_ace.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 
 from skbio.diversity._util import _validate_counts_vector
diff --git a/skbio/diversity/alpha/_base.py b/skbio/diversity/alpha/_base.py
index 324df3c..d7a3d02 100644
--- a/skbio/diversity/alpha/_base.py
+++ b/skbio/diversity/alpha/_base.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 from scipy.special import gammaln
 from scipy.optimize import fmin_powell, minimize_scalar
diff --git a/skbio/diversity/alpha/_chao1.py b/skbio/diversity/alpha/_chao1.py
index f85b520..0df695e 100644
--- a/skbio/diversity/alpha/_chao1.py
+++ b/skbio/diversity/alpha/_chao1.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 
 from ._base import osd
diff --git a/skbio/diversity/alpha/_faith_pd.py b/skbio/diversity/alpha/_faith_pd.py
index f868600..46049e6 100644
--- a/skbio/diversity/alpha/_faith_pd.py
+++ b/skbio/diversity/alpha/_faith_pd.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util._decorator import experimental
 from skbio.diversity._util import (_validate_counts_vector,
                                    _validate_otu_ids_and_tree,
@@ -109,9 +107,9 @@ def faith_pd(counts, otu_ids, tree, validate=True):
     >>> from io import StringIO
     >>> from skbio import TreeNode
     >>> tree = TreeNode.read(StringIO(
-    ...                      u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
-    ...                      u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
-    ...                      u',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
+    ...                      '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+    ...                      '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
+    ...                      ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
 
     We can then compute the Faith PD of the sample.
 
diff --git a/skbio/diversity/alpha/_gini.py b/skbio/diversity/alpha/_gini.py
index 6a39edc..4a9f569 100644
--- a/skbio/diversity/alpha/_gini.py
+++ b/skbio/diversity/alpha/_gini.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 
 from skbio.diversity._util import _validate_counts_vector
diff --git a/skbio/diversity/alpha/_lladser.py b/skbio/diversity/alpha/_lladser.py
index 7c35c9c..808c75c 100644
--- a/skbio/diversity/alpha/_lladser.py
+++ b/skbio/diversity/alpha/_lladser.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 
 from skbio.diversity._util import _validate_counts_vector
diff --git a/skbio/diversity/alpha/tests/__init__.py b/skbio/diversity/alpha/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/diversity/alpha/tests/__init__.py
+++ b/skbio/diversity/alpha/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/diversity/alpha/tests/test_ace.py b/skbio/diversity/alpha/tests/test_ace.py
index 6201cea..666eb08 100644
--- a/skbio/diversity/alpha/tests/test_ace.py
+++ b/skbio/diversity/alpha/tests/test_ace.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 from nose.tools import assert_almost_equal, assert_raises
 
diff --git a/skbio/diversity/alpha/tests/test_base.py b/skbio/diversity/alpha/tests/test_base.py
index 582eafa..5538236 100644
--- a/skbio/diversity/alpha/tests/test_base.py
+++ b/skbio/diversity/alpha/tests/test_base.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 from io import StringIO
 
@@ -29,12 +27,12 @@ class BaseTests(TestCase):
         self.sids1 = list('ABCD')
         self.oids1 = ['OTU%d' % i for i in range(1, 6)]
         self.t1 = TreeNode.read(StringIO(
-            u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
-            u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
+            '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            '0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
         self.t1_w_extra_tips = TreeNode.read(
-           StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                    u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
-                    u')root;'))
+           StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                    '0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
+                    ')root;'))
 
     def test_berger_parker_d(self):
         self.assertEqual(berger_parker_d(np.array([5])), 1)
diff --git a/skbio/diversity/alpha/tests/test_chao1.py b/skbio/diversity/alpha/tests/test_chao1.py
index c0f8ce7..41e17ea 100644
--- a/skbio/diversity/alpha/tests/test_chao1.py
+++ b/skbio/diversity/alpha/tests/test_chao1.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/diversity/alpha/tests/test_faith_pd.py b/skbio/diversity/alpha/tests/test_faith_pd.py
index 485428e..6f36bf8 100644
--- a/skbio/diversity/alpha/tests/test_faith_pd.py
+++ b/skbio/diversity/alpha/tests/test_faith_pd.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 from io import StringIO
 import os
@@ -32,12 +30,12 @@ class FaithPDTests(TestCase):
         self.sids1 = list('ABCD')
         self.oids1 = ['OTU%d' % i for i in range(1, 6)]
         self.t1 = TreeNode.read(StringIO(
-            u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
-            u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
+            '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            '0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
         self.t1_w_extra_tips = TreeNode.read(
-           StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                    u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
-                    u')root;'))
+           StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                    '0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
+                    ')root;'))
 
     def test_faith_pd_none_observed(self):
         actual = faith_pd(np.array([], dtype=int),
@@ -94,7 +92,7 @@ class FaithPDTests(TestCase):
 
     def test_faith_pd_minimal(self):
         # two tips
-        tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
+        tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
         actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
         expected = 0.25
         self.assertEqual(actual, expected)
@@ -123,8 +121,8 @@ class FaithPDTests(TestCase):
     def test_faith_pd_root_not_observed(self):
         # expected values computed by hand
         tree = TreeNode.read(
-            StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
-                     u'root;'))
+            StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
+                     'root;'))
         otu_ids = ['OTU%d' % i for i in range(1, 5)]
         # root node not observed, but branch between (OTU1, OTU2) and root
         # is considered observed
@@ -141,32 +139,32 @@ class FaithPDTests(TestCase):
     def test_faith_pd_invalid_input(self):
         # tree has duplicated tip ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids,
                           t)
 
         # unrooted tree as input
-        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
-                                   u'OTU4:0.7);'))
+        t = TreeNode.read(StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   'OTU4:0.7);'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
 
         # otu_ids has duplicated ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU2']
         self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
 
         # len of vectors not equal
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
@@ -176,31 +174,31 @@ class FaithPDTests(TestCase):
 
         # negative counts
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, -3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
 
         # tree with no branch lengths
         t = TreeNode.read(
-            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+            StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
 
         # tree missing some branch lengths
         t = TreeNode.read(
-            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
 
         # otu_ids not present in tree
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU42']
         self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
diff --git a/skbio/diversity/alpha/tests/test_gini.py b/skbio/diversity/alpha/tests/test_gini.py
index 2fa0491..195b889 100644
--- a/skbio/diversity/alpha/tests/test_gini.py
+++ b/skbio/diversity/alpha/tests/test_gini.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/diversity/alpha/tests/test_lladser.py b/skbio/diversity/alpha/tests/test_lladser.py
index 9ea8b13..959d7ae 100644
--- a/skbio/diversity/alpha/tests/test_lladser.py
+++ b/skbio/diversity/alpha/tests/test_lladser.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 import numpy.testing as npt
 from nose.tools import (assert_equal, assert_almost_equal, assert_raises,
diff --git a/skbio/diversity/beta/__init__.py b/skbio/diversity/beta/__init__.py
index 9fb38f4..be369d8 100644
--- a/skbio/diversity/beta/__init__.py
+++ b/skbio/diversity/beta/__init__.py
@@ -28,8 +28,6 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._unifrac import unweighted_unifrac, weighted_unifrac
diff --git a/skbio/diversity/beta/_unifrac.py b/skbio/diversity/beta/_unifrac.py
index 852fddf..88b9daf 100644
--- a/skbio/diversity/beta/_unifrac.py
+++ b/skbio/diversity/beta/_unifrac.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import functools
 
 import numpy as np
@@ -69,8 +67,8 @@ def unweighted_unifrac(u_counts, v_counts, otu_ids, tree, validate=True):
     Notes
     -----
     Unweighted UniFrac was originally described in [1]_. A discussion of
-    unweighted (qualitative) versus weighted (quantitiative) diversity metrics
-    is presented in [2]_. Deeper mathemtical discussions of this metric is
+    unweighted (qualitative) versus weighted (quantitative) diversity metrics
+    is presented in [2]_. Deeper mathematical discussions of this metric is
     presented in [3]_.
 
     If computing unweighted UniFrac for multiple pairs of samples, using
@@ -132,9 +130,9 @@ def unweighted_unifrac(u_counts, v_counts, otu_ids, tree, validate=True):
     >>> from io import StringIO
     >>> from skbio import TreeNode
     >>> tree = TreeNode.read(StringIO(
-    ...                      u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
-    ...                      u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
-    ...                      u',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
+    ...                      '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+    ...                      '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
+    ...                      ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
 
     We can then compute the unweighted UniFrac distance between the samples.
 
@@ -258,9 +256,9 @@ def weighted_unifrac(u_counts, v_counts, otu_ids, tree,
     >>> from io import StringIO
     >>> from skbio import TreeNode
     >>> tree = TreeNode.read(StringIO(
-    ...                      u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
-    ...                      u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
-    ...                      u',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
+    ...                      '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
+    ...                      '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
+    ...                      ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
 
     Compute the weighted UniFrac distance between the samples.
 
@@ -331,7 +329,7 @@ def _unweighted_unifrac(u_node_counts, v_node_counts, branch_lengths):
     Parameters
     ----------
     u_node_counts, v_node_counts : np.array
-        Vectors indicating presense (value greater than zero) and absense
+        Vectors indicating presence (value greater than zero) and absence
         (value equal to zero) of nodes in two samples, `u` and `v`. Order is
         assumed to be the same as in `branch_lengths`.
     branch_lengths : np.array
@@ -365,7 +363,7 @@ def _weighted_unifrac(u_node_counts, v_node_counts, u_total_count,
     Parameters
     ----------
     u_node_counts, v_node_counts : np.array
-        Vectors indicating presense (value greater than zero) and absense
+        Vectors indicating presence (value greater than zero) and absence
         (value equal to zero) of nodes in two samples, `u` and `v`. Order is
         assumed to be the same as in `branch_lengths`.
     u_total_count, v_total_counts : int
@@ -412,7 +410,7 @@ def _weighted_unifrac_normalized(u_node_counts, v_node_counts, u_total_count,
     Parameters
     ----------
     u_node_counts, v_node_counts : np.array
-         Vectors indicating presense (value greater than zero) and absense
+         Vectors indicating presence (value greater than zero) and absence
          (value equal to zero) of nodes in two samples, `u` and `v`. Order is
          assumed to be the same as in `branch_lengths`.
     u_total_count, v_total_counts : int
diff --git a/skbio/diversity/beta/tests/__init__.py b/skbio/diversity/beta/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/diversity/beta/tests/__init__.py
+++ b/skbio/diversity/beta/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/diversity/beta/tests/test_unifrac.py b/skbio/diversity/beta/tests/test_unifrac.py
index b762429..b98d964 100644
--- a/skbio/diversity/beta/tests/test_unifrac.py
+++ b/skbio/diversity/beta/tests/test_unifrac.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from io import StringIO
 from unittest import main, TestCase
 
@@ -34,16 +32,16 @@ class UnifracTests(TestCase):
         self.sids1 = list('ABCDEF')
         self.oids1 = ['OTU%d' % i for i in range(1, 6)]
         self.t1 = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         self.t1_w_extra_tips = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
-                     u')root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
+                     ')root;'))
 
         self.t2 = TreeNode.read(
-            StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
-                     u'root;'))
+            StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
+                     'root;'))
         self.oids2 = ['OTU%d' % i for i in range(1, 5)]
 
     def test_unweighted_otus_out_of_order(self):
@@ -102,7 +100,7 @@ class UnifracTests(TestCase):
 
     def test_unweighted_minimal_trees(self):
         # two tips
-        tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
+        tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
         actual = unweighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'],
                                     tree)
         expected = 1.0
@@ -110,7 +108,7 @@ class UnifracTests(TestCase):
 
     def test_weighted_minimal_trees(self):
         # two tips
-        tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
+        tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
         actual = weighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'], tree)
         expected = 0.25
         self.assertEqual(actual, expected)
@@ -194,8 +192,8 @@ class UnifracTests(TestCase):
 
         # tree has duplicated tip ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU2:0.75):1.25):0.0)root;'))
         u_counts = [1, 2, 3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
@@ -205,8 +203,8 @@ class UnifracTests(TestCase):
                           v_counts, otu_ids, t)
 
         # unrooted tree as input
-        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
-                                   u'OTU4:0.7);'))
+        t = TreeNode.read(StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                   'OTU4:0.7);'))
         u_counts = [1, 2, 3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
@@ -217,8 +215,8 @@ class UnifracTests(TestCase):
 
         # otu_ids has duplicated ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         u_counts = [1, 2, 3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU2']
@@ -229,8 +227,8 @@ class UnifracTests(TestCase):
 
         # len of vectors not equal
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         u_counts = [1, 2]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
@@ -255,8 +253,8 @@ class UnifracTests(TestCase):
 
         # negative counts
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         u_counts = [1, 2, -3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
@@ -274,7 +272,7 @@ class UnifracTests(TestCase):
 
         # tree with no branch lengths
         t = TreeNode.read(
-            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+            StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
         u_counts = [1, 2, 3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
@@ -285,8 +283,8 @@ class UnifracTests(TestCase):
 
         # tree missing some branch lengths
         t = TreeNode.read(
-            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         u_counts = [1, 2, 3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
@@ -297,8 +295,8 @@ class UnifracTests(TestCase):
 
         # otu_ids not present in tree
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                     '0.75,OTU5:0.75):1.25):0.0)root;'))
         u_counts = [1, 2, 3]
         v_counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU42']
diff --git a/skbio/diversity/tests/__init__.py b/skbio/diversity/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/diversity/tests/__init__.py
+++ b/skbio/diversity/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/diversity/tests/test_driver.py b/skbio/diversity/tests/test_driver.py
index 15bf08f..766675b 100644
--- a/skbio/diversity/tests/test_driver.py
+++ b/skbio/diversity/tests/test_driver.py
@@ -6,19 +6,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
+import io
 from unittest import TestCase, main
 
 import pandas as pd
 import numpy as np
 import numpy.testing as npt
-import six
 
 from skbio import DistanceMatrix, TreeNode
-from skbio.io._fileobject import StringIO
 from skbio.util._testing import assert_series_almost_equal
 from skbio.diversity import (alpha_diversity, beta_diversity,
+                             partial_beta_diversity,
                              get_alpha_diversity_metrics,
                              get_beta_diversity_metrics)
 from skbio.diversity.alpha import faith_pd, observed_otus
@@ -34,18 +32,18 @@ class AlphaDiversityTests(TestCase):
                                 [0, 0, 1, 1, 1]])
         self.sids1 = list('ABCD')
         self.oids1 = ['OTU%d' % i for i in range(1, 6)]
-        self.tree1 = TreeNode.read(StringIO(
-            u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
-            u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
+        self.tree1 = TreeNode.read(io.StringIO(
+            '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            '0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
 
         self.table2 = np.array([[1, 3],
                                 [0, 2],
                                 [0, 0]])
         self.sids2 = list('xyz')
         self.oids2 = ['OTU1', 'OTU5']
-        self.tree2 = TreeNode.read(StringIO(
-            u'(((((OTU1:42.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
-            u'0.0,(OTU4:0.75,OTU5:0.0001):1.25):0.0)root;'))
+        self.tree2 = TreeNode.read(io.StringIO(
+            '(((((OTU1:42.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
+            '0.0,(OTU4:0.75,OTU5:0.0001):1.25):0.0)root;'))
 
     def test_invalid_input(self):
         # number of ids doesn't match the number of samples
@@ -84,16 +82,17 @@ class AlphaDiversityTests(TestCase):
 
         # tree has duplicated tip ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(DuplicateNodeError, alpha_diversity, 'faith_pd',
                           counts, otu_ids=otu_ids, tree=t)
 
         # unrooted tree as input
-        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
-                                   u'OTU4:0.7);'))
+        t = TreeNode.read(io.StringIO(
+            '((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,OTU4:0.7);'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
@@ -101,8 +100,9 @@ class AlphaDiversityTests(TestCase):
 
         # otu_ids has duplicated ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU2']
         self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
@@ -110,15 +110,17 @@ class AlphaDiversityTests(TestCase):
 
         # count and OTU vectors are not equal length
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2']
         self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
                           counts, otu_ids=otu_ids, tree=t)
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
@@ -126,7 +128,7 @@ class AlphaDiversityTests(TestCase):
 
         # tree with no branch lengths
         t = TreeNode.read(
-            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+            io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
@@ -134,8 +136,8 @@ class AlphaDiversityTests(TestCase):
 
         # tree missing some branch lengths
         t = TreeNode.read(
-            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                        '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
@@ -143,8 +145,9 @@ class AlphaDiversityTests(TestCase):
 
         # some otu_ids not present in tree
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU42']
         self.assertRaises(MissingNodeError, alpha_diversity, 'faith_pd',
@@ -266,7 +269,7 @@ class BetaDiversityTests(TestCase):
                        [2, 3],
                        [0, 1]]
         self.sids1 = list('ABC')
-        self.tree1 = TreeNode.read(StringIO(
+        self.tree1 = TreeNode.read(io.StringIO(
             '((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
         self.oids1 = ['O1', 'O2']
 
@@ -281,40 +284,40 @@ class BetaDiversityTests(TestCase):
     def test_invalid_input(self):
         # number of ids doesn't match the number of samples
         error_msg = ("Number of rows")
-        with six.assertRaisesRegex(self, ValueError, error_msg):
+        with self.assertRaisesRegex(ValueError, error_msg):
             beta_diversity(self.table1, list('AB'), 'euclidean')
 
         # unknown metric provided
         error_msg = "not-a-metric"
-        with six.assertRaisesRegex(self, ValueError, error_msg):
+        with self.assertRaisesRegex(ValueError, error_msg):
             beta_diversity('not-a-metric', self.table1)
 
         # 3-D list provided as input
         error_msg = ("Only 1-D and 2-D")
-        with six.assertRaisesRegex(self, ValueError, error_msg):
+        with self.assertRaisesRegex(ValueError, error_msg):
             beta_diversity('euclidean', [[[43]]])
 
         # negative counts
         error_msg = "negative values."
-        with six.assertRaisesRegex(self, ValueError, error_msg):
+        with self.assertRaisesRegex(ValueError, error_msg):
             beta_diversity('euclidean', [[0, 1, 3, 4], [0, 3, -12, 42]])
-        with six.assertRaisesRegex(self, ValueError, error_msg):
+        with self.assertRaisesRegex(ValueError, error_msg):
             beta_diversity('euclidean', [[0, 1, 3, -4], [0, 3, 12, 42]])
 
         # additional kwargs
         error_msg = ("'not_a_real_kwarg'")
-        with six.assertRaisesRegex(self, TypeError, error_msg):
+        with self.assertRaisesRegex(TypeError, error_msg):
             beta_diversity('euclidean', [[0, 1, 3], [0, 3, 12]],
                            not_a_real_kwarg=42.0)
-        with six.assertRaisesRegex(self, TypeError, error_msg):
+        with self.assertRaisesRegex(TypeError, error_msg):
             beta_diversity('unweighted_unifrac', [[0, 1, 3], [0, 3, 12]],
                            not_a_real_kwarg=42.0, tree=self.tree1,
                            otu_ids=['O1', 'O2', 'O3'])
-        with six.assertRaisesRegex(self, TypeError, error_msg):
+        with self.assertRaisesRegex(TypeError, error_msg):
             beta_diversity('weighted_unifrac', [[0, 1, 3], [0, 3, 12]],
                            not_a_real_kwarg=42.0, tree=self.tree1,
                            otu_ids=['O1', 'O2', 'O3'])
-        with six.assertRaisesRegex(self, TypeError, error_msg):
+        with self.assertRaisesRegex(TypeError, error_msg):
             beta_diversity(weighted_unifrac, [[0, 1, 3], [0, 3, 12]],
                            not_a_real_kwarg=42.0, tree=self.tree1,
                            otu_ids=['O1', 'O2', 'O3'])
@@ -333,8 +336,9 @@ class BetaDiversityTests(TestCase):
 
         # tree has duplicated tip ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(DuplicateNodeError, beta_diversity,
@@ -344,8 +348,8 @@ class BetaDiversityTests(TestCase):
                           tree=t)
 
         # unrooted tree as input
-        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
-                                   u'OTU4:0.7);'))
+        t = TreeNode.read(io.StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                      'OTU4:0.7);'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, beta_diversity,
@@ -356,8 +360,9 @@ class BetaDiversityTests(TestCase):
 
         # otu_ids has duplicated ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU2']
         self.assertRaises(ValueError, beta_diversity,
@@ -368,8 +373,9 @@ class BetaDiversityTests(TestCase):
 
         # count and OTU vectors are not equal length
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2']
         self.assertRaises(ValueError, beta_diversity,
@@ -378,8 +384,9 @@ class BetaDiversityTests(TestCase):
                           'unweighted_unifrac', counts, otu_ids=otu_ids,
                           tree=t)
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 2]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, beta_diversity,
@@ -390,7 +397,7 @@ class BetaDiversityTests(TestCase):
 
         # tree with no branch lengths
         t = TreeNode.read(
-            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+            io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, beta_diversity,
@@ -401,8 +408,8 @@ class BetaDiversityTests(TestCase):
 
         # tree missing some branch lengths
         t = TreeNode.read(
-            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                        '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, beta_diversity,
@@ -413,8 +420,9 @@ class BetaDiversityTests(TestCase):
 
         # some otu_ids not present in tree
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU42']
         self.assertRaises(MissingNodeError, beta_diversity,
@@ -628,5 +636,146 @@ class MetricGetters(TestCase):
         n = sorted(list(m))
         self.assertEqual(m, n)
 
+
+class TestPartialBetaDiversity(TestCase):
+    def setUp(self):
+        self.table1 = [[1, 5],
+                       [2, 3],
+                       [0, 1]]
+        self.sids1 = list('ABC')
+        self.tree1 = TreeNode.read(io.StringIO(
+            '((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
+        self.oids1 = ['O1', 'O2']
+
+        self.table2 = [[23, 64, 14, 0, 0, 3, 1],
+                       [0, 3, 35, 42, 0, 12, 1],
+                       [0, 5, 5, 0, 40, 40, 0],
+                       [44, 35, 9, 0, 1, 0, 0],
+                       [0, 2, 8, 0, 35, 45, 1],
+                       [0, 0, 25, 35, 0, 19, 0]]
+        self.sids2 = list('ABCDEF')
+
+    def test_id_pairs_as_iterable(self):
+        id_pairs = iter([('B', 'C'), ])
+        dm = partial_beta_diversity('unweighted_unifrac', self.table1,
+                                    self.sids1, otu_ids=self.oids1,
+                                    tree=self.tree1, id_pairs=id_pairs)
+        self.assertEqual(dm.shape, (3, 3))
+        expected_data = [[0.0, 0.0, 0.0],
+                         [0.0, 0.0, 0.25],
+                         [0.0, 0.25, 0.0]]
+        expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                npt.assert_almost_equal(dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+        # pass in iter(foo)
+
+    def test_unweighted_unifrac_partial(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        # expected values calculated by hand
+        dm = partial_beta_diversity('unweighted_unifrac', self.table1,
+                                    self.sids1, otu_ids=self.oids1,
+                                    tree=self.tree1, id_pairs=[('B', 'C'), ])
+        self.assertEqual(dm.shape, (3, 3))
+        expected_data = [[0.0, 0.0, 0.0],
+                         [0.0, 0.0, 0.25],
+                         [0.0, 0.25, 0.0]]
+        expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                npt.assert_almost_equal(dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_weighted_unifrac_partial_full(self):
+        # TODO: update npt.assert_almost_equal calls to use DistanceMatrix
+        # near-equality testing when that support is available
+        # expected values calculated by hand
+        dm1 = partial_beta_diversity('weighted_unifrac', self.table1,
+                                     self.sids1, otu_ids=self.oids1,
+                                     tree=self.tree1, id_pairs=[('A', 'B'),
+                                                                ('A', 'C'),
+                                                                ('B', 'C')])
+        dm2 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
+                             otu_ids=self.oids1, tree=self.tree1)
+
+        self.assertEqual(dm1.shape, (3, 3))
+        self.assertEqual(dm1, dm2)
+        expected_data = [
+            [0.0, 0.1750000, 0.12499999],
+            [0.1750000, 0.0, 0.3000000],
+            [0.12499999, 0.3000000, 0.0]]
+        expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
+        for id1 in self.sids1:
+            for id2 in self.sids1:
+                npt.assert_almost_equal(dm1[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_self_self_pair(self):
+        error_msg = ("A duplicate or a self-self pair was observed.")
+        with self.assertRaisesRegex(ValueError, error_msg):
+            partial_beta_diversity((lambda x, y: x + y), self.table1,
+                                   self.sids1, id_pairs=[('A', 'B'),
+                                                         ('A', 'A')])
+
+    def test_duplicate_pairs(self):
+        # confirm that partial pairwise execution fails if duplicate pairs are
+        # observed
+        error_msg = ("A duplicate or a self-self pair was observed.")
+        with self.assertRaisesRegex(ValueError, error_msg):
+            partial_beta_diversity((lambda x, y: x + y), self.table1,
+                                   self.sids1, id_pairs=[('A', 'B'),
+                                                         ('A', 'B')])
+
+    def test_duplicate_transpose_pairs(self):
+        # confirm that partial pairwise execution fails if a transpose
+        # duplicate is observed
+        error_msg = ("A duplicate or a self-self pair was observed.")
+        with self.assertRaisesRegex(ValueError, error_msg):
+            partial_beta_diversity((lambda x, y: x + y), self.table1,
+                                   self.sids1, id_pairs=[('A', 'B'),
+                                                         ('A', 'B')])
+
+    def test_pairs_not_subset(self):
+        # confirm raise when pairs are not a subset of IDs
+        error_msg = ("`id_pairs` are not a subset of `ids`")
+        with self.assertRaisesRegex(ValueError, error_msg):
+            partial_beta_diversity((lambda x, y: x + y), self.table1,
+                                   self.sids1, id_pairs=[('x', 'b'), ])
+
+    def test_euclidean(self):
+        # confirm that pw execution through partial is identical
+        def euclidean(u, v, **kwargs):
+            return np.sqrt(((u - v)**2).sum())
+
+        id_pairs = [('A', 'B'), ('B', 'F'), ('D', 'E')]
+        actual_dm = partial_beta_diversity(euclidean, self.table2, self.sids2,
+                                           id_pairs=id_pairs)
+        actual_dm = DistanceMatrix(actual_dm, self.sids2)
+
+        expected_data = [
+            [0., 80.8455317, 0., 0., 0., 0.],
+            [80.8455317, 0., 0., 0., 0., 14.422205],
+            [0., 0., 0., 0., 0., 0.],
+            [0., 0., 0., 0., 78.7908624, 0.],
+            [0., 0., 0., 78.7908624, 0., 0.],
+            [0., 14.422205, 0., 0., 0., 0.]]
+
+        expected_dm = DistanceMatrix(expected_data, self.sids2)
+        for id1 in self.sids2:
+            for id2 in self.sids2:
+                npt.assert_almost_equal(actual_dm[id1, id2],
+                                        expected_dm[id1, id2], 6)
+
+    def test_unusable_metric(self):
+        id_pairs = [('A', 'B'), ('B', 'F'), ('D', 'E')]
+        error_msg = "partial_beta_diversity is only compatible"
+        with self.assertRaisesRegex(ValueError, error_msg):
+            partial_beta_diversity('hamming', self.table2, self.sids2,
+                                   id_pairs=id_pairs)
+
+
 if __name__ == "__main__":
     main()
diff --git a/skbio/diversity/tests/test_util.py b/skbio/diversity/tests/test_util.py
index e0f88d6..9051660 100644
--- a/skbio/diversity/tests/test_util.py
+++ b/skbio/diversity/tests/test_util.py
@@ -6,14 +6,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+import io
+from unittest import TestCase, main
 
 import numpy as np
 import numpy.testing as npt
 
-from unittest import TestCase, main
-
-from skbio.io._fileobject import StringIO
 from skbio import TreeNode
 from skbio.diversity._util import (_validate_counts_vector,
                                    _validate_counts_matrix,
@@ -124,32 +122,36 @@ class ValidationTests(TestCase):
     def test_validate_otu_ids_and_tree(self):
         # basic valid input
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
 
         # all tips observed
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 1, 1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
         self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
 
         # no tips observed
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = []
         otu_ids = []
         self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
 
         # all counts zero
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [0, 0, 0, 0, 0]
         otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
         self.assertTrue(_validate_otu_ids_and_tree(counts, otu_ids, t) is None)
@@ -157,16 +159,17 @@ class ValidationTests(TestCase):
     def test_validate_otu_ids_and_tree_invalid_input(self):
         # tree has duplicated tip ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU2:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU2:0.75):1.25):0.0)root;'))
         counts = [1, 1, 1]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(DuplicateNodeError, _validate_otu_ids_and_tree,
                           counts, otu_ids, t)
 
         # unrooted tree as input
-        t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
-                                   u'OTU4:0.7);'))
+        t = TreeNode.read(io.StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
+                                      'OTU4:0.7);'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
@@ -174,8 +177,9 @@ class ValidationTests(TestCase):
 
         # otu_ids has duplicated ids
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU2']
         self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
@@ -183,8 +187,9 @@ class ValidationTests(TestCase):
 
         # len of vectors not equal
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
@@ -196,7 +201,7 @@ class ValidationTests(TestCase):
 
         # tree with no branch lengths
         t = TreeNode.read(
-            StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
+            io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
@@ -204,8 +209,9 @@ class ValidationTests(TestCase):
 
         # tree missing some branch lengths
         t = TreeNode.read(
-            StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU3']
         self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
@@ -213,22 +219,23 @@ class ValidationTests(TestCase):
 
         # otu_ids not present in tree
         t = TreeNode.read(
-            StringIO(u'(((((OTU1:0.25,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
-                     u'0.75,OTU5:0.75):1.25):0.0)root;'))
+            io.StringIO(
+                '(((((OTU1:0.25,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
+                '0.75,OTU5:0.75):1.25):0.0)root;'))
         counts = [1, 2, 3]
         otu_ids = ['OTU1', 'OTU2', 'OTU32']
         self.assertRaises(MissingNodeError, _validate_otu_ids_and_tree, counts,
                           otu_ids, t)
 
         # single node tree
-        t = TreeNode.read(StringIO(u'root;'))
+        t = TreeNode.read(io.StringIO('root;'))
         counts = []
         otu_ids = []
         self.assertRaises(ValueError, _validate_otu_ids_and_tree, counts,
                           otu_ids, t)
 
     def test_vectorize_counts_and_tree(self):
-        t = TreeNode.read(StringIO(u"((a:1, b:2)c:3)root;"))
+        t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root;"))
         counts = np.array([[0, 1], [1, 5], [10, 1]])
         count_array, indexed, branch_lengths = \
             _vectorize_counts_and_tree(counts, np.array(['a', 'b']), t)
diff --git a/skbio/io/__init__.py b/skbio/io/__init__.py
index 495a7c5..069c316 100644
--- a/skbio/io/__init__.py
+++ b/skbio/io/__init__.py
@@ -191,8 +191,6 @@ not know how you want to serialize an object. OO interfaces define a default
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from importlib import import_module
 
 from skbio.util import TestRunner
diff --git a/skbio/io/_exception.py b/skbio/io/_exception.py
index 4cad028..1b5d6e6 100644
--- a/skbio/io/_exception.py
+++ b/skbio/io/_exception.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 
 class IOSourceError(Exception):
     """Raised when a file source cannot be resolved."""
diff --git a/skbio/io/_fileobject.py b/skbio/io/_fileobject.py
index 80428bd..a74556a 100644
--- a/skbio/io/_fileobject.py
+++ b/skbio/io/_fileobject.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import io
 
 
@@ -20,97 +18,41 @@ def is_binary_file(file):
 # of the time. I am very sorry to the poor soul who has to read beyond.
 
 
-class StringIO(io.StringIO):
-    """Treat Bytes the same as Unicode by decoding ascii, for testing only."""
-    def __init__(self, string=None, **kwargs):
-        if isinstance(string, bytes):
-            string = string.decode()
-        super(StringIO, self).__init__(string, **kwargs)
-
-
-class SaneTextIOWrapper(io.TextIOWrapper):
-    def __init__(self, *args, **kwargs):
-        super(SaneTextIOWrapper, self).__init__(*args, **kwargs)
-        self._should_close_buffer = True
-
+class FlushDestructorMixin:
     def __del__(self):
-        # Accept the inevitability of the buffer being closed by the destructor
-        # because of this line in Python 2.7:
-        # https://github.com/python/cpython/blob/2.7/Modules/_io/iobase.c#L221
-        self._should_close_buffer = False
-        # Actually close for Python 3 because it is an override.
-        # We can't call super because Python 2 doesn't actually
-        # have a `__del__` method for IOBase (hence this
-        # workaround). Close is idempotent so it won't matter
-        # that Python 2 will end up calling this twice
-        self.close()
-
-    def close(self):
-        # We can't stop Python 2.7 from calling close in the deconstructor
-        # so instead we can prevent the buffer from being closed with a flag.
-
-        # Based on:
-        # https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L1586
-        if self.buffer is not None and not self.closed:
-            try:
-                self.flush()
-            finally:
-                if self._should_close_buffer:
-                    self.buffer.close()
+        # By default, the destructor calls close(), which flushes and closes
+        # the underlying buffer. Override to only flush.
+        if not self.closed:
+            self.flush()
 
 
-class WrappedBufferedRandom(io.BufferedRandom):
-    def __init__(self, *args, **kwargs):
-        super(WrappedBufferedRandom, self).__init__(*args, **kwargs)
-        self._should_close_raw = True
+class SaneTextIOWrapper(FlushDestructorMixin, io.TextIOWrapper):
+    pass
 
-    def __del__(self):
-        self._should_close_raw = False
-        self.close()
 
-    # Based on:
-    # https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L732
-    def close(self):
-        if self.raw is not None and not self.closed:
-            try:
-                # may raise BlockingIOError or BrokenPipeError etc
-                self.flush()
-            finally:
-                if self._should_close_raw:
-                    self.raw.close()
+class WrappedBufferedRandom(FlushDestructorMixin, io.BufferedRandom):
+    pass
 
 
-class CompressedMixin(object):
+class CompressedMixin(FlushDestructorMixin):
     """Act as a bridge between worlds"""
     def __init__(self, before_file, *args, **kwargs):
         self.streamable = kwargs.pop('streamable', True)
-        self._should_close_raw = True
         self._before_file = before_file
         super(CompressedMixin, self).__init__(*args, **kwargs)
 
-    def __del__(self):
-        self._should_close_raw = False
-        self.close()
-
     @property
     def closed(self):
         return self.raw.closed or self._before_file.closed
 
-    # Based on:
-    # https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L732
     def close(self):
-        if self.raw is not None and not self.closed:
-            try:
-                # may raise BlockingIOError or BrokenPipeError etc
-                self.flush()
-            finally:
-                if self._should_close_raw:
-                    self.raw.close()
-                    # The above will not usually close the before_file
-                    # We want the decompression to be transparent, so we don't
-                    # want users to deal with this edge case. Instead we can
-                    # just close the original now that we are being closed.
-                    self._before_file.close()
+        super(CompressedMixin, self).close()
+
+        # The above will not usually close before_file. We want the
+        # decompression to be transparent, so we don't want users to deal with
+        # this edge case. Instead we can just close the original now that we
+        # are being closed.
+        self._before_file.close()
 
 
 class CompressedBufferedReader(CompressedMixin, io.BufferedReader):
@@ -124,7 +66,7 @@ class CompressedBufferedWriter(CompressedMixin, io.BufferedWriter):
 class IterableStringReaderIO(io.StringIO):
     def __init__(self, iterable, newline):
         self._iterable = iterable
-        super(IterableStringReaderIO, self).__init__(u''.join(iterable),
+        super(IterableStringReaderIO, self).__init__(''.join(iterable),
                                                      newline=newline)
 
 
diff --git a/skbio/io/_iosources.py b/skbio/io/_iosources.py
index 2cdf15e..7cf4e12 100644
--- a/skbio/io/_iosources.py
+++ b/skbio/io/_iosources.py
@@ -6,13 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import io
 import gzip
-import bz2file
-from tempfile import gettempdir
+import bz2
+import tempfile
 import itertools
 
 import requests
@@ -24,6 +21,14 @@ from ._fileobject import (IterableStringWriterIO, IterableStringReaderIO,
                           WrappedBufferedRandom)
 
 
+# NamedTemporaryFile isn't an actual file class, it is a function which
+# returns _TemporaryFileWrapper around a normal file object. Instead of
+# relying on this implementation, we take whatever the class of the result of
+# NamedTemporaryFile is.
+with tempfile.NamedTemporaryFile() as fh:
+    _WrappedTemporaryFile = type(fh)
+
+
 def get_io_sources():
     return (
         # The order of these source is significant as they will short-circuit
@@ -32,6 +37,7 @@ def get_io_sources():
         BytesIOSource,
         BufferedIOSource,
         TextIOSource,
+        WrappedTemporaryFileSource,
         IterableSource
     )
 
@@ -49,7 +55,7 @@ def get_compression_handler(name):
     return compressors.get(name, False)
 
 
-class IOSource(object):
+class IOSource:
     closeable = True
 
     def __init__(self, file, options):
@@ -79,7 +85,7 @@ class Compressor(IOSource):
 
 class FilePathSource(IOSource):
     def can_read(self):
-        return isinstance(self.file, six.string_types)
+        return isinstance(self.file, str)
 
     def can_write(self):
         return self.can_read()
@@ -94,12 +100,12 @@ class FilePathSource(IOSource):
 class HTTPSource(IOSource):
     def can_read(self):
         return (
-            isinstance(self.file, six.string_types) and
+            isinstance(self.file, str) and
             requests.compat.urlparse(self.file).scheme in {'http', 'https'})
 
     def get_reader(self):
         sess = CacheControl(requests.Session(),
-                            cache=FileCache(gettempdir()))
+                            cache=FileCache(tempfile.gettempdir()))
         req = sess.get(self.file)
 
         # if the response is not 200, an exception will be raised
@@ -160,6 +166,25 @@ class TextIOSource(IOSource):
         return self.file
 
 
+class WrappedTemporaryFileSource(IOSource):
+    closeable = False
+
+    def can_read(self):
+        return (isinstance(self.file, _WrappedTemporaryFile) and
+                self.file.readable())
+
+    def can_write(self):
+        return (isinstance(self.file, _WrappedTemporaryFile) and
+                self.file.writable())
+
+    def get_reader(self):
+        # _TemporaryFileWrapper has a file attribute which is an actual fileobj
+        return self.file.file
+
+    def get_writer(self):
+        return self.file.file
+
+
 class IterableSource(IOSource):
     def can_read(self):
         if hasattr(self.file, '__iter__'):
@@ -168,17 +193,11 @@ class IterableSource(IOSource):
             if head is None:
                 self.repaired = []
                 return True
-            if isinstance(head, six.text_type):
+            if isinstance(head, str):
                 self.repaired = itertools.chain([head], iterator)
                 return True
             else:
                 # We may have mangled a generator at this point, so just abort
-                if six.PY2 and isinstance(head, bytes):
-                    raise IOSourceError(
-                        "Could not open source: %r (mode: %r).\n Prepend a "
-                        r"`u` to the strings (e.g. [u'line1\n', u'line2\n'])" %
-                        (self.file, self.options['mode']))
-
                 raise IOSourceError(
                     "Could not open source: %r (mode: %r)" %
                     (self.file, self.options['mode']))
@@ -219,11 +238,11 @@ class BZ2Compressor(Compressor):
         return self.file.peek(3)[:3] == b'BZh'
 
     def get_reader(self):
-        return bz2file.BZ2File(self.file, mode='rb')
+        return bz2.BZ2File(self.file, mode='rb')
 
     def get_writer(self):
-        return bz2file.BZ2File(self.file, mode='wb',
-                               compresslevel=self.options['compresslevel'])
+        return bz2.BZ2File(self.file, mode='wb',
+                           compresslevel=self.options['compresslevel'])
 
 
 class AutoCompressor(Compressor):
diff --git a/skbio/io/_warning.py b/skbio/io/_warning.py
index c7e44ef..cde3ca6 100644
--- a/skbio/io/_warning.py
+++ b/skbio/io/_warning.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 
 class FormatIdentificationWarning(Warning):
     """Warn when the sniffer of a format cannot confirm the format."""
diff --git a/skbio/io/format/__init__.py b/skbio/io/format/__init__.py
index f85db28..e9635ed 100644
--- a/skbio/io/format/__init__.py
+++ b/skbio/io/format/__init__.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 test = TestRunner(__file__).test
diff --git a/skbio/io/format/_base.py b/skbio/io/format/_base.py
index 9087be8..ea308a5 100644
--- a/skbio/io/format/_base.py
+++ b/skbio/io/format/_base.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from future.builtins import range
-
 import re
 import warnings
 
diff --git a/skbio/io/format/_blast.py b/skbio/io/format/_blast.py
index 6247234..a1c4d65 100644
--- a/skbio/io/format/_blast.py
+++ b/skbio/io/format/_blast.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 import functools
 
 import pandas as pd
diff --git a/skbio/io/format/blast6.py b/skbio/io/format/blast6.py
index c06b1a3..21aa7d7 100644
--- a/skbio/io/format/blast6.py
+++ b/skbio/io/format/blast6.py
@@ -193,12 +193,12 @@ be used:
 ...                    default_columns=True)
 >>> df # doctest: +NORMALIZE_WHITESPACE
   qseqid                           sseqid  pident  length  mismatch  gapopen \\
-0   moaC     gi|15800534|ref|NP_286546.1|  100.00     161         0        0
-1   moaC  gi|170768970|ref|ZP_02903423.1|   99.38     161         1        0
+0   moaC     gi|15800534|ref|NP_286546.1|  100.00   161.0       0.0      0.0
+1   moaC  gi|170768970|ref|ZP_02903423.1|   99.38   161.0       1.0      0.0
 <BLANKLINE>
-   qstart  qend  sstart  send         evalue  bitscore
-0       1   161       1   161  3.000000e-114       330
-1       1   161       1   161  9.000000e-114       329
+   qstart   qend  sstart   send         evalue  bitscore
+0     1.0  161.0     1.0  161.0  3.000000e-114     330.0
+1     1.0  161.0     1.0  161.0  9.000000e-114     329.0
 
 Suppose we have a ``blast+6`` file with user-supplied (non-default) columns:
 
@@ -218,9 +218,9 @@ in the file:
 ...                    columns=['qseqid', 'pident', 'mismatch', 'length',
 ...                             'gapopen', 'qend', 'bitscore', 'sstart'])
 >>> df # doctest: +NORMALIZE_WHITESPACE
-  qseqid  pident  mismatch  length  gapopen  qend  bitscore  sstart
-0   moaC  100.00         0     161        0   161       330       1
-1   moaC   99.38         1     161        0   161       329       1
+  qseqid  pident  mismatch  length  gapopen   qend  bitscore  sstart
+0   moaC  100.00       0.0   161.0      0.0  161.0     330.0     1.0
+1   moaC   99.38       1.0   161.0      0.0  161.0     329.0     1.0
 
 References
 ----------
@@ -239,9 +239,6 @@ and-csv.html
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 import pandas as pd
 
 from skbio.io import create_format
diff --git a/skbio/io/format/blast7.py b/skbio/io/format/blast7.py
index 9b108a6..b287515 100644
--- a/skbio/io/format/blast7.py
+++ b/skbio/io/format/blast7.py
@@ -206,11 +206,11 @@ Read the file into a ``pd.DataFrame``:
 
 >>> df = skbio.io.read(fh, into=pd.DataFrame)
 >>> df # doctest: +NORMALIZE_WHITESPACE
-       qacc      sacc        evalue  qstart   qend  sstart   send
-0  AE000111  AE000111  0.000000e+00       1  10596       1  10596
-1  AE000111  AE000174  8.000000e-30    5565   5671    6928   6821
-2  AE000111  AE000171  3.000000e-24    5587   5671    2214   2130
-3  AE000111  AE000425  6.000000e-26    5587   5671    8552   8468
+       qacc      sacc        evalue  qstart     qend  sstart     send
+0  AE000111  AE000111  0.000000e+00     1.0  10596.0     1.0  10596.0
+1  AE000111  AE000174  8.000000e-30  5565.0   5671.0  6928.0   6821.0
+2  AE000111  AE000171  3.000000e-24  5587.0   5671.0  2214.0   2130.0
+3  AE000111  AE000425  6.000000e-26  5587.0   5671.0  8552.0   8468.0
 
 Suppose we have a legacy BLAST 9 file:
 
@@ -238,14 +238,14 @@ Read the file into a ``pd.DataFrame``:
 >>> df = skbio.io.read(fh, into=pd.DataFrame)
 >>> df # doctest: +NORMALIZE_WHITESPACE
      qseqid          sseqid  pident  length  mismatch  gapopen  qstart  qend \\
-0  AF178033  EMORG:AF178033  100.00     811         0        0       1   811
-1  AF178033  EMORG:AF178032   94.57     811        44        0       1   811
-2  AF178033  EMORG:AF178031   94.82     811        42        0       1   811
+0  AF178033  EMORG:AF178033  100.00   811.0       0.0      0.0     1.0  811.0
+1  AF178033  EMORG:AF178032   94.57   811.0      44.0      0.0     1.0  811.0
+2  AF178033  EMORG:AF178031   94.82   811.0      42.0      0.0     1.0  811.0
 <BLANKLINE>
-   sstart  send  evalue  bitscore
-0       1   811       0    1566.6
-1       1   811       0    1217.7
-2       1   811       0    1233.5
+   sstart   send  evalue  bitscore
+0     1.0  811.0     0.0    1566.6
+1     1.0  811.0     0.0    1217.7
+2     1.0  811.0     0.0    1233.5
 
 References
 ==========
@@ -262,9 +262,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 import pandas as pd
 
 from skbio.io import create_format, BLAST7FormatError
diff --git a/skbio/io/format/clustal.py b/skbio/io/format/clustal.py
index aab3e88..7989912 100644
--- a/skbio/io/format/clustal.py
+++ b/skbio/io/format/clustal.py
@@ -144,9 +144,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 from skbio.io import create_format, ClustalFormatError
 from skbio.alignment import TabularMSA
 
diff --git a/skbio/io/format/emptyfile.py b/skbio/io/format/emptyfile.py
index 49692e9..e9c1545 100644
--- a/skbio/io/format/emptyfile.py
+++ b/skbio/io/format/emptyfile.py
@@ -27,9 +27,6 @@ An empty file consists of only whitespace characters.
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 from skbio.io import create_format
 
 emptyfile = create_format('<emptyfile>')
diff --git a/skbio/io/format/fasta.py b/skbio/io/format/fasta.py
index 8a54bcf..d0f713a 100644
--- a/skbio/io/format/fasta.py
+++ b/skbio/io/format/fasta.py
@@ -470,7 +470,7 @@ Stats:
     length: 42
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 50.00%
 ------------------------------------------------
 0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
@@ -607,11 +607,7 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from future.builtins import range, zip
-from six.moves import zip_longest
-
+import itertools
 import textwrap
 
 import numpy as np
@@ -685,8 +681,8 @@ def _fasta_to_generator(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
         qual_gen = _parse_fasta_raw(qual, _parse_quality_scores,
                                     QUALFormatError)
 
-        for fasta_rec, qual_rec in zip_longest(fasta_gen, qual_gen,
-                                               fillvalue=None):
+        for fasta_rec, qual_rec in itertools.zip_longest(fasta_gen, qual_gen,
+                                                         fillvalue=None):
             if fasta_rec is None:
                 raise FASTAFormatError(
                     "QUAL file has more records than FASTA file.")
@@ -906,8 +902,7 @@ def _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
                         description_newline_replacement, max_width,
                         lowercase=None):
     def seq_gen():
-        for seq in obj:
-            yield seq
+        yield from obj
 
     _generator_to_fasta(
         seq_gen(), fh, qual=qual,
diff --git a/skbio/io/format/fastq.py b/skbio/io/format/fastq.py
index 79ce877..7040599 100644
--- a/skbio/io/format/fastq.py
+++ b/skbio/io/format/fastq.py
@@ -236,7 +236,7 @@ Stats:
     length: 35
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 14.29%
 ----------------------------------------
 0 TATGTATATA TAACATATAC ATATATACAT ACATA
@@ -283,10 +283,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from future.builtins import range, zip
-
 import re
 
 import numpy as np
@@ -317,7 +313,12 @@ def _fastq_sniffer(fh):
 
     try:
         not_empty = False
-        for _ in zip(range(10), _fastq_to_generator(fh, phred_offset=33)):
+        for _, seq in zip(range(10), _fastq_to_generator(fh, phred_offset=33)):
+            split_length = len((seq.metadata['id'] +
+                                seq.metadata['description']).split(':'))
+            description = seq.metadata['description'].split(':')
+            if split_length == 10 and description[1] in 'YN':
+                return True, {'variant': 'illumina1.8'}
             not_empty = True
         return not_empty, {}
     except (FASTQFormatError, ValueError):
@@ -532,8 +533,7 @@ def _sequences_to_fastq(obj, fh, variant, phred_offset,
                         id_whitespace_replacement,
                         description_newline_replacement, lowercase=None):
     def seq_gen():
-        for seq in obj:
-            yield seq
+        yield from obj
 
     _generator_to_fastq(
         seq_gen(), fh, variant=variant, phred_offset=phred_offset,
diff --git a/skbio/io/format/genbank.py b/skbio/io/format/genbank.py
index a6d931c..11f77c6 100644
--- a/skbio/io/format/genbank.py
+++ b/skbio/io/format/genbank.py
@@ -179,7 +179,7 @@ Stats:
     length: 34
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 35.29%
 -----------------------------------------------------------------
 0 AGAGGTTCTA GCACATCCCT CTATAAAAAA CTAA
@@ -209,7 +209,7 @@ Stats:
     length: 34
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 35.29%
 -----------------------------------------------------------------
 0 AGAGGUUCUA GCACAUCCCU CUAUAAAAAA CUAA
@@ -258,10 +258,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from future.builtins import range, zip
-
 import re
 import numpy as np
 import pandas as pd
diff --git a/skbio/io/format/lsmat.py b/skbio/io/format/lsmat.py
index fa48192..c65d4a2 100644
--- a/skbio/io/format/lsmat.py
+++ b/skbio/io/format/lsmat.py
@@ -70,9 +70,6 @@ or writing to a file.
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 import csv
 
 import numpy as np
diff --git a/skbio/io/format/newick.py b/skbio/io/format/newick.py
index 3796c87..5f02dc8 100644
--- a/skbio/io/format/newick.py
+++ b/skbio/io/format/newick.py
@@ -220,11 +220,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-from future.builtins import zip, range
-
 from skbio.io import create_format, NewickFormatError
 from skbio.tree import TreeNode
 
diff --git a/skbio/io/format/ordination.py b/skbio/io/format/ordination.py
index 465be22..e3188b3 100644
--- a/skbio/io/format/ordination.py
+++ b/skbio/io/format/ordination.py
@@ -186,14 +186,10 @@ Load the ordination results from the file:
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from future.builtins import zip
-
 import numpy as np
 import pandas as pd
 
-from skbio._base import OrdinationResults
+from skbio.stats.ordination import OrdinationResults
 from skbio.io import create_format, OrdinationFormatError
 
 ordination = create_format('ordination')
diff --git a/skbio/io/format/phylip.py b/skbio/io/format/phylip.py
index 6ee7b15..fd3519e 100644
--- a/skbio/io/format/phylip.py
+++ b/skbio/io/format/phylip.py
@@ -191,7 +191,7 @@ default integer index labels:
 
 >>> msa.reassign_index()
 >>> msa.index
-Int64Index([0, 1, 2], dtype='int64')
+RangeIndex(start=0, stop=3, step=1)
 
 We can now write the ``TabularMSA`` in PHYLIP format:
 
@@ -223,9 +223,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
 from skbio.alignment import TabularMSA
 from skbio.io import create_format, PhylipFormatError
 from skbio.util._misc import chunk_str
diff --git a/skbio/io/format/qseq.py b/skbio/io/format/qseq.py
index 6604bef..ee503c4 100644
--- a/skbio/io/format/qseq.py
+++ b/skbio/io/format/qseq.py
@@ -158,11 +158,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-from future.builtins import zip, range
-
 from skbio.io import create_format, QSeqFormatError
 from skbio.io.format._base import _decode_qual_to_phred, _get_nth_sequence
 from skbio.sequence import Sequence, DNA, RNA, Protein
diff --git a/skbio/io/format/stockholm.py b/skbio/io/format/stockholm.py
index b4f2cb3..9531756 100644
--- a/skbio/io/format/stockholm.py
+++ b/skbio/io/format/stockholm.py
@@ -117,12 +117,16 @@ Where ``DE`` is the feature name and ``CBS Domain`` is the feature data.
 GF metadata is stored in the ``TabularMSA`` ``metadata`` dictionary.
 
 .. note:: When reading, duplicate GF feature names will have their values
-   concatenated in the order they appear in the file. When writing, each GF
-   feature will be placed on its own line, regardless of length.
+   concatenated in the order they appear in the file. Concatenation will
+   also add a space between lines if one isn't already there in order to avoid
+   joining words together. When writing, each GF feature will be placed on its
+   own line, regardless of length.
 
 .. note:: Trees labelled with ``NH``/``TN`` are handled differently than other
    GF features. When reading a Stockholm file with these features, the reader
-   follows the rules described in [2]_.
+   follows the rules described in [2]_. Trees split over multiple lines will
+   have their values concatenated. Unlike other GF features, trees will never
+   have a space added when they are concatenated.
 
    A single tree without an identifier will be stored as::
 
@@ -147,6 +151,39 @@ GF metadata is stored in the ``TabularMSA`` ``metadata`` dictionary.
            }
        }
 
+.. note:: References labelled with ``RN``/``RM``/``RT``/``RA``/``RL``/``RC``
+   are handled differently than other GF features. When reading a Stockholm
+   file with these features, the reader populates a list of dictionaries,
+   where each dictionary represents a single reference. The list contains
+   references in the order they appear in the file, regardless of the value
+   provided for ``RN``. If a reference does not include all possible reference
+   tags (e.g. ``RC`` is missing), the dictionary will only contain the
+   reference tags present for that reference. When writing, the writer adds a
+   reference number (``RN``) line before writing each reference, for example:
+
+   .. code-block:: none
+
+      #=GF RN [1]
+      #=GF RA Kestrel Gorlick
+      ...
+      #=GF RN [2]
+      ...
+
+   References will be stored as::
+
+       metadata = {
+           'RN': [{
+               'RM': 'reference medline',
+               'RT': 'reference title',
+               'RA': 'reference author',
+               'RL': 'reference location',
+               'RC': 'reference comment'
+           }, {
+               'RM': 'reference medline',
+               ...
+           }]
+       }
+
 GS metadata
 +++++++++++
 Data relating to a specific sequence in the multiple sequence alignment.
@@ -166,8 +203,10 @@ Where ``O83071/259-312`` is the sequence name, ``AC`` is the feature name, and
 GS metadata is stored in the sequence-specific ``metadata`` dictionary.
 
 .. note:: When reading, duplicate GS feature names will have their values
-   concatenated in the order they appear in the file. When writing, each GS
-   feature will be placed on its own line, regardless of length.
+   concatenated in the order they appear in the file. Concatenation will
+   also add a space between lines if one isn't already there in order to avoid
+   joining words together. When writing, each GS feature will be placed on its
+   own line, regardless of length.
 
 GR metadata
 +++++++++++
@@ -240,7 +279,7 @@ Suppose we have a Stockholm file containing an MSA of protein sequences
 >>> fs = '\\n'.join([
 ...         '# STOCKHOLM 1.0',
 ...         '#=GF CC CBS domains are small intracellular modules mostly'
-...         ' found ',
+...         ' found',
 ...         '#=GF CC in 2 or four copies within a protein.',
 ...         '#=GS O83071/192-246 AC O83071',
 ...         '#=GS O31698/88-139 OS Bacillus subtilis',
@@ -363,11 +402,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from future.utils import viewitems
-from future.builtins import zip
-
 from collections import OrderedDict
 
 from skbio.alignment import TabularMSA
@@ -375,6 +409,7 @@ from skbio.sequence._grammared_sequence import GrammaredSequence
 from skbio.io import create_format, StockholmFormatError
 
 stockholm = create_format('stockholm')
+_REFERENCE_TAGS = frozenset({'RM', 'RT', 'RA', 'RL', 'RC'})
 
 
 @stockholm.sniffer()
@@ -397,7 +432,10 @@ def _stockholm_sniffer(fh):
 def _stockholm_to_tabular_msa(fh, constructor=None):
     # Checks that user has passed required constructor parameter
     if constructor is None:
-        raise ValueError("Must provide `constructor` parameter.")
+        raise ValueError("Must provide `constructor` parameter indicating the "
+                         "type of sequences in the alignment. `constructor` "
+                         "must be a subclass of `GrammaredSequence` "
+                         "(e.g., `DNA`, `RNA`, `Protein`).")
     # Checks that contructor parameter is supported
     elif not issubclass(constructor, GrammaredSequence):
         raise TypeError("`constructor` must be a subclass of "
@@ -447,7 +485,7 @@ def _stockholm_to_tabular_msa(fh, constructor=None):
 
 
 # For storing intermediate data used to construct a Sequence object.
-class _MSAData(object):
+class _MSAData:
     def __init__(self):
         self._seqs = {}
         self._seq_order = []
@@ -474,12 +512,32 @@ class _MSAData(object):
         # Handles extra line(s) of an already created tree
         elif feature_name == 'NH' and feature_name in self._metadata:
             trees = self._metadata[feature_name]
-            tree_id = list(trees.keys())[-1]
-            self._metadata[feature_name][tree_id] = (trees[tree_id] +
-                                                     feature_data)
+            if isinstance(trees, OrderedDict):
+                tree_id = next(reversed(trees))
+                self._metadata[feature_name][tree_id] = (trees[tree_id] +
+                                                         feature_data)
+            else:
+                self._metadata[feature_name] = (self._metadata[feature_name] +
+                                                feature_data)
+        elif feature_name == 'RN':
+            if feature_name not in self._metadata:
+                self._metadata[feature_name] = [OrderedDict()]
+            else:
+                self._metadata[feature_name].append(OrderedDict())
+        elif feature_name in _REFERENCE_TAGS:
+            if 'RN' not in self._metadata:
+                raise StockholmFormatError("Expected 'RN' tag to precede "
+                                           "'%s' tag." % feature_name)
+            reference_dict = self._metadata['RN'][-1]
+            if feature_name not in reference_dict:
+                reference_dict[feature_name] = feature_data
+            else:
+                padding = _get_padding(reference_dict[feature_name])
+                reference_dict[feature_name] += padding + feature_data
         elif feature_name in self._metadata:
+            padding = _get_padding(self._metadata[feature_name][-1])
             self._metadata[feature_name] = (self._metadata[feature_name] +
-                                            feature_data)
+                                            padding + feature_data)
         else:
             self._metadata[feature_name] = feature_data
 
@@ -525,7 +583,7 @@ class _MSAData(object):
                           index=self._seq_order)
 
 
-class _SeqData(object):
+class _SeqData:
     def __init__(self, name):
         self.name = name
         self._seq = None
@@ -548,7 +606,8 @@ class _SeqData(object):
         if self.metadata is None:
             self.metadata = OrderedDict()
         if feature_name in self.metadata:
-            self.metadata[feature_name] += feature_data
+            padding = _get_padding(self.metadata[feature_name][-1])
+            self.metadata[feature_name] += padding + feature_data
         else:
             self.metadata[feature_name] = feature_data
 
@@ -637,11 +696,36 @@ def _tabular_msa_to_stockholm(obj, fh):
 
     # Writes GF data to file
     if obj.has_metadata():
-        for gf_feature, gf_feature_data in viewitems(obj.metadata):
+        for gf_feature, gf_feature_data in obj.metadata.items():
             if gf_feature == 'NH' and isinstance(gf_feature_data, dict):
-                for tree_id, tree in viewitems(obj.metadata[gf_feature]):
+                for tree_id, tree in gf_feature_data.items():
                     fh.write("#=GF TN %s\n" % tree_id)
                     fh.write("#=GF NH %s\n" % tree)
+            elif gf_feature == 'RN':
+                if not isinstance(gf_feature_data, list):
+                    raise StockholmFormatError(
+                        "Expected 'RN' to contain a list of reference "
+                        "dictionaries, got %r." % gf_feature_data)
+
+                for ref_num, dictionary in enumerate(gf_feature_data, start=1):
+                    if not isinstance(dictionary, dict):
+                        raise StockholmFormatError(
+                            "Expected reference information to be stored as a "
+                            "dictionary, found reference %d stored as %r." %
+                            (ref_num, type(dictionary).__name__))
+
+                    fh.write("#=GF RN [%d]\n" % ref_num)
+                    for feature in dictionary:
+                        if feature not in _REFERENCE_TAGS:
+                            formatted_reference_tags = ', '.join(
+                                [tag for tag in _REFERENCE_TAGS])
+                            raise StockholmFormatError(
+                                "Invalid reference tag %r found in reference "
+                                "dictionary %d. Valid reference tags are: %s."
+                                % (feature, ref_num, formatted_reference_tags))
+
+                        fh.write("#=GF %s %s\n" % (feature,
+                                                   dictionary[feature]))
             else:
                 fh.write("#=GF %s %s\n" % (gf_feature, gf_feature_data))
 
@@ -649,10 +733,12 @@ def _tabular_msa_to_stockholm(obj, fh):
     # Writes GS data to file, retrieves GR data, and retrieves sequence data
     for seq, seq_name in zip(obj, obj.index):
         seq_name = str(seq_name)
+
         if seq.has_metadata():
-            for gs_feature, gs_feature_data in viewitems(seq.metadata):
+            for gs_feature, gs_feature_data in seq.metadata.items():
                 fh.write("#=GS %s %s %s\n" % (seq_name, gs_feature,
                                               gs_feature_data))
+
         unpadded_data.append((seq_name, str(seq)))
         if seq.has_positional_metadata():
             df = _format_positional_metadata(seq.positional_metadata,
@@ -708,3 +794,7 @@ def _format_positional_metadata(df, data_type):
                                        " in column %s of incorrect length."
                                        % (data_type, column))
     return str_df
+
+
+def _get_padding(item):
+    return '' if item[-1].isspace() else ' '
diff --git a/skbio/io/format/tests/__init__.py b/skbio/io/format/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/io/format/tests/__init__.py
+++ b/skbio/io/format/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/io/format/tests/data/fastq_single_seq_illumina1.8 b/skbio/io/format/tests/data/fastq_single_seq_illumina1.8
new file mode 100644
index 0000000..e279264
--- /dev/null
+++ b/skbio/io/format/tests/data/fastq_single_seq_illumina1.8
@@ -0,0 +1,4 @@
+ at EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG
+CCGCCGGTCATCCAATTCATTGCGAAAGGT
++
+n%OiP]XgBh]u:9jO|KN4Op~7Ry;-&.
diff --git a/skbio/io/format/tests/data/stockholm_different_padding b/skbio/io/format/tests/data/stockholm_different_padding
new file mode 100644
index 0000000..53eb556
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_different_padding
@@ -0,0 +1,8 @@
+# STOCKHOLM 1.0
+#=GF RN [1]
+#=GF RC A Runon
+#=GF RC Comment Without Whitespace
+#=GF RN [2]
+#=GF RC A Runon 
+#=GF RC Comment With Whitespace
+//
\ No newline at end of file
diff --git a/skbio/io/format/tests/data/stockholm_missing_reference_items b/skbio/io/format/tests/data/stockholm_missing_reference_items
new file mode 100644
index 0000000..f42c004
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_missing_reference_items
@@ -0,0 +1,5 @@
+# STOCKHOLM 1.0
+#=GF RN [1]
+#=GF RT A Title
+#=GF RA The Author
+//
diff --git a/skbio/io/format/tests/data/stockholm_missing_rn_tag b/skbio/io/format/tests/data/stockholm_missing_rn_tag
new file mode 100644
index 0000000..3fd7e02
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_missing_rn_tag
@@ -0,0 +1,3 @@
+# STOCKHOLM 1.0
+#=GF RL Flagstaff Arizona
+//
diff --git a/skbio/io/format/tests/data/stockholm_multi_line_tree_no_id b/skbio/io/format/tests/data/stockholm_multi_line_tree_no_id
new file mode 100644
index 0000000..8b65cb2
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_multi_line_tree_no_id
@@ -0,0 +1,4 @@
+# STOCKHOLM 1.0
+#=GF NH ABCD
+#=GF NH EFGH
+//
diff --git a/skbio/io/format/tests/data/stockholm_multi_line_tree_with_id b/skbio/io/format/tests/data/stockholm_multi_line_tree_with_id
new file mode 100644
index 0000000..337c747
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_multi_line_tree_with_id
@@ -0,0 +1,5 @@
+# STOCKHOLM 1.0
+#=GF TN tree1
+#=GF NH ABCD
+#=GF NH EFGH
+//
diff --git a/skbio/io/format/tests/data/stockholm_multiple_multi_line_trees b/skbio/io/format/tests/data/stockholm_multiple_multi_line_trees
new file mode 100644
index 0000000..89424e4
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_multiple_multi_line_trees
@@ -0,0 +1,8 @@
+# STOCKHOLM 1.0
+#=GF TN tree1
+#=GF NH ABCD
+#=GF NH EFGH
+#=GF TN tree2
+#=GF NH IJKL
+#=GF NH MNOP
+//
diff --git a/skbio/io/format/tests/data/stockholm_multiple_references b/skbio/io/format/tests/data/stockholm_multiple_references
new file mode 100644
index 0000000..2ab5be1
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_multiple_references
@@ -0,0 +1,20 @@
+# STOCKHOLM 1.0
+#=GF RN [1]
+#=GF RM 123456789
+#=GF RT Title 1
+#=GF RA Author 1
+#=GF RL Location 1
+#=GF RC Comment 1
+#=GF RN [2]
+#=GF RM 987654321
+#=GF RT Title 2
+#=GF RA Author 2
+#=GF RL Location 2
+#=GF RC Comment 2
+#=GF RN [3]
+#=GF RM 132465879
+#=GF RT Title 3
+#=GF RA Author 3
+#=GF RL Location 3
+#=GF RC Comment 3
+//
diff --git a/skbio/io/format/tests/data/stockholm_runon_gf b/skbio/io/format/tests/data/stockholm_runon_gf_no_whitespace
similarity index 98%
copy from skbio/io/format/tests/data/stockholm_runon_gf
copy to skbio/io/format/tests/data/stockholm_runon_gf_no_whitespace
index a65830f..d58aba4 100644
--- a/skbio/io/format/tests/data/stockholm_runon_gf
+++ b/skbio/io/format/tests/data/stockholm_runon_gf_no_whitespace
@@ -1,5 +1,5 @@
 # STOCKHOLM 1.0
-#=GF CC CBS domains are small intracellular modules mostly found 
+#=GF CC CBS domains are small intracellular modules mostly found
 #=GF CC in 2 or four copies within a protein.
 GG1344       ACTGGTTCAATG
 //
diff --git a/skbio/io/format/tests/data/stockholm_runon_gf b/skbio/io/format/tests/data/stockholm_runon_gf_with_whitespace
similarity index 100%
rename from skbio/io/format/tests/data/stockholm_runon_gf
rename to skbio/io/format/tests/data/stockholm_runon_gf_with_whitespace
diff --git a/skbio/io/format/tests/data/stockholm_runon_gs b/skbio/io/format/tests/data/stockholm_runon_gs
deleted file mode 100644
index 755eb5f..0000000
--- a/skbio/io/format/tests/data/stockholm_runon_gs
+++ /dev/null
@@ -1,5 +0,0 @@
-# STOCKHOLM 1.0
-#=GS seq1 AL ABCDEFGHIJKLMNOP
-#=GS seq1 AL QRSTUVWXYZ
-seq1 ATCGTTCAGTG
-//
diff --git a/skbio/io/format/tests/data/stockholm_runon_gs_no_whitespace b/skbio/io/format/tests/data/stockholm_runon_gs_no_whitespace
new file mode 100644
index 0000000..b0f0919
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_runon_gs_no_whitespace
@@ -0,0 +1,5 @@
+# STOCKHOLM 1.0
+#=GS seq1 LN This is a runon
+#=GS seq1 LN GS line.
+seq1 ATCGTTCAGTG
+//
diff --git a/skbio/io/format/tests/data/stockholm_runon_gs_with_whitespace b/skbio/io/format/tests/data/stockholm_runon_gs_with_whitespace
new file mode 100644
index 0000000..01b75f4
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_runon_gs_with_whitespace
@@ -0,0 +1,5 @@
+# STOCKHOLM 1.0
+#=GS seq1 LN This is a runon 
+#=GS seq1 LN GS line.
+seq1 ATCGTTCAGTG
+//
diff --git a/skbio/io/format/tests/data/stockholm_runon_references b/skbio/io/format/tests/data/stockholm_runon_references
new file mode 100644
index 0000000..f6216e6
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_runon_references
@@ -0,0 +1,10 @@
+# STOCKHOLM 1.0
+#=GF RN [1]
+#=GF RM 123456789
+#=GF RT A Runon
+#=GF RT Title
+#=GF RA The Author
+#=GF RL A Location
+#=GF RC A Runon 
+#=GF RC Comment
+//
diff --git a/skbio/io/format/tests/data/stockholm_runon_references_mixed b/skbio/io/format/tests/data/stockholm_runon_references_mixed
new file mode 100644
index 0000000..e70fa33
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_runon_references_mixed
@@ -0,0 +1,10 @@
+# STOCKHOLM 1.0
+#=GF RN [1]
+#=GF RC A Runon 
+#=GF RM 123456789
+#=GF RT A Runon
+#=GF RA The Author
+#=GF RT Title
+#=GF RL A Location
+#=GF RC Comment
+//
diff --git a/skbio/io/format/tests/data/stockholm_single_reference b/skbio/io/format/tests/data/stockholm_single_reference
new file mode 100644
index 0000000..d698ce9
--- /dev/null
+++ b/skbio/io/format/tests/data/stockholm_single_reference
@@ -0,0 +1,8 @@
+# STOCKHOLM 1.0
+#=GF RN [1]
+#=GF RM 123456789
+#=GF RT A Title
+#=GF RA The Author
+#=GF RL A Location
+#=GF RC Comment
+//
diff --git a/skbio/io/format/tests/test_base.py b/skbio/io/format/tests/test_base.py
index a19fb25..882f68a 100644
--- a/skbio/io/format/tests/test_base.py
+++ b/skbio/io/format/tests/test_base.py
@@ -6,11 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-import six
-from future.builtins import range, zip
-
 import unittest
 
 import numpy.testing as npt
@@ -299,32 +294,29 @@ class TestFormatFASTALikeRecords(unittest.TestCase):
             npt.assert_equal(o, e)
 
     def test_newline_character_in_id_whitespace_replacement(self):
-        with six.assertRaisesRegex(self, ValueError, 'Newline character'):
+        with self.assertRaisesRegex(ValueError, 'Newline character'):
             list(_format_fasta_like_records(self.gen, '-\n--', ' ', False))
 
     def test_newline_character_in_description_newline_replacement(self):
-        with six.assertRaisesRegex(self, ValueError, 'Newline character'):
+        with self.assertRaisesRegex(ValueError, 'Newline character'):
             list(_format_fasta_like_records(self.gen, None, 'a\nb', False))
 
     def test_empty_sequence(self):
         def blank_seq_gen():
-            for seq in (DNA('A'), Sequence(''),
-                        RNA('GG')):
-                yield seq
+            yield from (DNA('A'), Sequence(''), RNA('GG'))
 
-        with six.assertRaisesRegex(self, ValueError, '2nd.*empty'):
+        with self.assertRaisesRegex(ValueError, '2nd.*empty'):
             list(_format_fasta_like_records(blank_seq_gen(), None, None,
                                             False))
 
     def test_missing_quality_scores(self):
         def missing_qual_gen():
-            for seq in (RNA('A', positional_metadata={'quality': [42]}),
+            yield from (RNA('A', positional_metadata={'quality': [42]}),
                         Sequence('AG'),
-                        DNA('GG', positional_metadata={'quality': [41, 40]})):
-                yield seq
+                        DNA('GG', positional_metadata={'quality': [41, 40]}))
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   '2nd sequence.*quality scores'):
+        with self.assertRaisesRegex(ValueError,
+                                    '2nd sequence.*quality scores'):
             list(_format_fasta_like_records(missing_qual_gen(), '-', '-',
                                             True))
 
diff --git a/skbio/io/format/tests/test_blast6.py b/skbio/io/format/tests/test_blast6.py
index 4f0b78d..db81ddd 100644
--- a/skbio/io/format/tests/test_blast6.py
+++ b/skbio/io/format/tests/test_blast6.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from six import assertRaisesRegex
-
 import unittest
 
 import pandas as pd
@@ -89,20 +85,20 @@ class TestBlast6Reader(unittest.TestCase):
 
     def test_custom_and_default_passed_error(self):
         fp = get_data_path('blast6_default_single_line')
-        with assertRaisesRegex(self, ValueError,
-                               "`columns` and `default_columns`"):
+        with self.assertRaisesRegex(ValueError,
+                                    "`columns` and `default_columns`"):
             _blast6_to_data_frame(fp, columns=['qseqid'], default_columns=True)
 
     def test_no_columns_passed_error(self):
         fp = get_data_path('blast6_default_single_line')
-        with assertRaisesRegex(self, ValueError,
-                               "Either `columns` or `default_columns`"):
+        with self.assertRaisesRegex(ValueError,
+                                    "Either `columns` or `default_columns`"):
             _blast6_to_data_frame(fp)
 
     def test_wrong_amount_of_columns_error(self):
         fp = get_data_path('blast6_invalid_number_of_columns')
-        with assertRaisesRegex(self, ValueError,
-                               "Specified number of columns \(12\).*\(10\)"):
+        with self.assertRaisesRegex(
+                ValueError, "Specified number of columns \(12\).*\(10\)"):
             _blast6_to_data_frame(fp, default_columns=True)
 
     def test_different_data_in_same_column(self):
@@ -112,8 +108,8 @@ class TestBlast6Reader(unittest.TestCase):
 
     def test_wrong_column_name_error(self):
         fp = get_data_path('blast6_default_single_line')
-        with assertRaisesRegex(self, ValueError,
-                               "Unrecognized column.*'abcd'"):
+        with self.assertRaisesRegex(ValueError,
+                                    "Unrecognized column.*'abcd'"):
             _blast6_to_data_frame(fp, columns=['qseqid', 'sseqid', 'pident',
                                                'length', 'mismatch', 'gapopen',
                                                'qstart', 'qend', 'sstart',
diff --git a/skbio/io/format/tests/test_blast7.py b/skbio/io/format/tests/test_blast7.py
index f323077..8ffe399 100644
--- a/skbio/io/format/tests/test_blast7.py
+++ b/skbio/io/format/tests/test_blast7.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-from six import assertRaisesRegex
-
 import unittest
 
 import pandas as pd
@@ -166,45 +162,47 @@ class TestBlast7Reader(unittest.TestCase):
 
     def test_differing_fields_error(self):
         fp = get_data_path("blast7_invalid_differing_fields")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
-                               " do.*\[.*'qseqid', .*'sseqid', .*'score'\]"):
+        with self.assertRaisesRegex(
+                BLAST7FormatError,
+                "Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
+                " do.*\[.*'qseqid', .*'sseqid', .*'score'\]"):
             _blast7_to_data_frame(fp)
         fp = get_data_path("legacy9_invalid_differing_fields")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
-                               " do.*\[.*'qseqid', .*'sseqid', "
-                               ".*'sallseqid'\]"):
+        with self.assertRaisesRegex(
+                BLAST7FormatError,
+                "Fields \[.*'qseqid', .*'sseqid', .*'qstart'\]"
+                " do.*\[.*'qseqid', .*'sseqid', "
+                ".*'sallseqid'\]"):
             _blast7_to_data_frame(fp)
 
     def test_no_data_error(self):
         fp = get_data_path("blast7_invalid_gibberish")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "File contains no"):
+        with self.assertRaisesRegex(BLAST7FormatError,
+                                    "File contains no"):
             _blast7_to_data_frame(fp)
         fp = get_data_path("blast7_invalid_no_data")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "File contains no"):
+        with self.assertRaisesRegex(BLAST7FormatError,
+                                    "File contains no"):
             _blast7_to_data_frame(fp)
         fp = get_data_path("empty")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "File contains no"):
+        with self.assertRaisesRegex(BLAST7FormatError,
+                                    "File contains no"):
             _blast7_to_data_frame(fp)
 
     def test_wrong_amount_of_columns_error(self):
         fp = get_data_path("blast7_invalid_too_many_columns")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "Number of fields.*\(2\)"):
+        with self.assertRaisesRegex(BLAST7FormatError,
+                                    "Number of fields.*\(2\)"):
             _blast7_to_data_frame(fp)
         fp = get_data_path("legacy9_invalid_too_many_columns")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "Number of fields.*\(12\)"):
+        with self.assertRaisesRegex(BLAST7FormatError,
+                                    "Number of fields.*\(12\)"):
             _blast7_to_data_frame(fp)
 
     def test_unrecognized_field_error(self):
         fp = get_data_path("blast7_invalid_unrecognized_field")
-        with assertRaisesRegex(self, BLAST7FormatError,
-                               "Unrecognized field \(.*'sallid'\)"):
+        with self.assertRaisesRegex(BLAST7FormatError,
+                                    "Unrecognized field \(.*'sallid'\)"):
             _blast7_to_data_frame(fp)
 
 
diff --git a/skbio/io/format/tests/test_clustal.py b/skbio/io/format/tests/test_clustal.py
index 733d65b..a0eece3 100644
--- a/skbio/io/format/tests/test_clustal.py
+++ b/skbio/io/format/tests/test_clustal.py
@@ -6,14 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import string
 from io import StringIO
 from unittest import TestCase, main
 
-import six
-
 from skbio import TabularMSA
 from skbio.sequence._grammared_sequence import GrammaredSequence
 from skbio.util._decorator import classproperty, overrides
@@ -38,7 +34,7 @@ class CustomSequence(GrammaredSequence):
 
     @classproperty
     @overrides(GrammaredSequence)
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set(string.ascii_letters)
 
     @classproperty
@@ -49,11 +45,11 @@ class CustomSequence(GrammaredSequence):
 
 class ClustalHelperTests(TestCase):
     def test_label_line_parser(self):
-        self.assertEqual(_label_line_parser(StringIO(u'abc\tucag')),
+        self.assertEqual(_label_line_parser(StringIO('abc\tucag')),
                          ({"abc": ["ucag"]}, ['abc']))
 
         with self.assertRaises(ClustalFormatError):
-            _label_line_parser(StringIO(u'abctucag'))
+            _label_line_parser(StringIO('abctucag'))
 
     def test_is_clustal_seq_line(self):
         ic = _is_clustal_seq_line
@@ -106,12 +102,12 @@ class ClustalIOTests(TestCase):
 
     def setUp(self):
         self.valid_clustal_out = [
-            StringIO(u'CLUSTAL\n\nabc\tucag'),
-            StringIO(u'CLUSTAL\n\nabc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nab'
+            StringIO('CLUSTAL\n\nabc\tucag'),
+            StringIO('CLUSTAL\n\nabc\tuuu\ndef\tccc\n\n    ***\n\ndef ggg\nab'
                      'c\taaa\n'),
-            StringIO(u'\n'.join(['CLUSTAL\n', 'abc uca', 'def ggg ccc'])),
-            StringIO(u'\n'.join(['CLUSTAL\n', 'abc uca ggg', 'def ggg ccc'])),
-            StringIO(u"""CLUSTAL
+            StringIO('\n'.join(['CLUSTAL\n', 'abc uca', 'def ggg ccc'])),
+            StringIO('\n'.join(['CLUSTAL\n', 'abc uca ggg', 'def ggg ccc'])),
+            StringIO("""CLUSTAL
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -120,7 +116,7 @@ xyz             ------------------------------------------------------------
 
 
 """),
-            StringIO(u"""CLUSTAL
+            StringIO("""CLUSTAL
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -132,7 +128,7 @@ abc             GUCGAUACGUACGUCAGUCAGUACGUCAGCAUGCAUACGUACGUCGUACGUACGU-CGAC
 def             -----------------------------------------CGCGAUGCAUGCAU-CGAU
 xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC
 """),
-            StringIO(u"""CLUSTAL W (1.82) multiple sequence alignment
+            StringIO("""CLUSTAL W (1.82) multiple sequence alignment
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -148,7 +144,7 @@ xyz             -------------------------------------CAUGCAUCGUACGUACGCAUGAC
 abc             UGACUAGUCAGCUAGCAUCGAUCAGU
 def             CGAUCAGUCAGUCGAU----------
 xyz             UGCUGCAUCA----------------"""),
-            StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
+            StringIO("""CLUSTAL W (1.74) multiple sequence alignment
 
 
 abc             GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA 60
@@ -166,27 +162,27 @@ def             CGAUCAGUCAGUCGAU---------- 34
 xyz             UGCUGCAUCA---------------- 33
                 *     ***""")
             ]
-        self.invalid_clustal_out = [StringIO(u'\n'.join(['dshfjsdfhdfsj',
-                                                         'hfsdjksdfhjsdf'])),
-                                    StringIO(u'\n'.join(['hfsdjksdfhjsdf'])),
-                                    StringIO(u'\n'.join(['dshfjsdfhdfsj',
-                                                         'dshfjsdfhdfsj',
-                                                         'hfsdjksdfhjsdf'])),
-                                    StringIO(u'\n'.join(['dshfjsdfhdfsj',
-                                                         '\t',
-                                                         'hfsdjksdfhjsdf'])),
-                                    StringIO(u'\n'.join(['dshfj\tdfhdfsj',
-                                                         'hfsdjksdfhjsdf'])),
-                                    StringIO(u'\n'.join(['dshfjsdfhdfsj',
-                                                         'hfsdjk\tdfhjsdf'])),
-                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
+        self.invalid_clustal_out = [StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        'dshfjsdfhdfsj',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        '\t',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfj\tdfhdfsj',
+                                                        'hfsdjksdfhjsdf'])),
+                                    StringIO('\n'.join(['dshfjsdfhdfsj',
+                                                        'hfsdjk\tdfhjsdf'])),
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
 ------------------------------------------------------------
 adk -----GGGGGGG------------------------------------------------
 """),
-                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -196,7 +192,7 @@ adk -----GGGGGGG------------------------------------------------
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
 adk -----GGGGGGG---------------------------------------------
 """),
-                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -207,7 +203,7 @@ adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCA
 adk -----GGGGGGG---------------------------------------------
 """),
 
-                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
 
 
 adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -215,7 +211,7 @@ adj GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
 adk -----GGGGGGG------------------------------------------------
 """),
 
-                                    StringIO(u"""CLUSTAL W (1.74) multiple sequence alignment
+                                    StringIO("""CLUSTAL W (1.74) multiple sequence alignment
 
 
 GCAUGCAUGCAUGAUCGUACGUCAGCAUGCUAGACUGCAUACGUACGUACGCAUGCAUCA
@@ -239,7 +235,7 @@ UGCUGCAUCA---------------- 33
         self.assertEqual(dict(result), {})
 
     def test_tabular_msa_to_clustal_with_bad_input(self):
-        BAD = StringIO(u'\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
+        BAD = StringIO('\n'.join(['dshfjsdfhdfsj', 'hfsdjksdfhjsdf']))
 
         with self.assertRaises(ClustalFormatError):
             dict(_clustal_to_tabular_msa(BAD, constructor=CustomSequence))
@@ -273,25 +269,25 @@ UGCUGCAUCA---------------- 33
         self.assertEqual(_clustal_sniffer(StringIO()), (False, {}))
 
     def test_no_constructor(self):
-        with six.assertRaisesRegex(self, ValueError, "`constructor`"):
+        with self.assertRaisesRegex(ValueError, "`constructor`"):
             _clustal_to_tabular_msa(self.valid_clustal_out[0])
 
     def test_duplicate_labels(self):
         msa = TabularMSA([CustomSequence('foo'),
                           CustomSequence('bar')], index=['a', 'a'])
 
-        with six.assertRaisesRegex(self, ClustalFormatError, "index.*unique"):
+        with self.assertRaisesRegex(ClustalFormatError, "index.*unique"):
             with StringIO() as fh:
                 _tabular_msa_to_clustal(msa, fh)
 
     def test_invalid_lengths(self):
         fh = StringIO(
-            u"CLUSTAL\n"
+            "CLUSTAL\n"
             "\n\n"
             "abc             GCAU\n"
             "def             -----\n")
 
-        with six.assertRaisesRegex(self, ClustalFormatError, "not aligned"):
+        with self.assertRaisesRegex(ClustalFormatError, "not aligned"):
             _clustal_to_tabular_msa(fh, constructor=CustomSequence)
 
 
diff --git a/skbio/io/format/tests/test_emptyfile.py b/skbio/io/format/tests/test_emptyfile.py
index af9309f..f0d754b 100644
--- a/skbio/io/format/tests/test_emptyfile.py
+++ b/skbio/io/format/tests/test_emptyfile.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import unittest
 import io
 
@@ -20,16 +18,16 @@ class TestEmptyFile(unittest.TestCase):
         self.assertTrue(res)
         self.assertEqual({}, kw)
 
-        res, kw = _empty_file_sniffer(io.StringIO(u"       \n   \t "))
+        res, kw = _empty_file_sniffer(io.StringIO("       \n   \t "))
         self.assertTrue(res)
         self.assertEqual({}, kw)
 
     def test_not_empty(self):
-        res, kw = _empty_file_sniffer(io.StringIO(u"a"))
+        res, kw = _empty_file_sniffer(io.StringIO("a"))
         self.assertFalse(res)
         self.assertEqual({}, kw)
 
-        res, kw = _empty_file_sniffer(io.StringIO(u"                  \n \ta"))
+        res, kw = _empty_file_sniffer(io.StringIO("                  \n \ta"))
         self.assertFalse(res)
         self.assertEqual({}, kw)
 
diff --git a/skbio/io/format/tests/test_fasta.py b/skbio/io/format/tests/test_fasta.py
index 7052942..38b1a0b 100644
--- a/skbio/io/format/tests/test_fasta.py
+++ b/skbio/io/format/tests/test_fasta.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import map, range, zip
-import six
-
 import io
 import string
 from unittest import TestCase, main
@@ -43,7 +39,7 @@ class CustomSequence(GrammaredSequence):
 
     @classproperty
     @overrides(GrammaredSequence)
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set(string.ascii_letters)
 
     @classproperty
@@ -493,7 +489,7 @@ class ReaderTests(TestCase):
 
     def test_fasta_to_generator_invalid_files(self):
         for fp, kwargs, error_type, error_msg_regex in self.invalid_fps:
-            with six.assertRaisesRegex(self, error_type, error_msg_regex):
+            with self.assertRaisesRegex(error_type, error_msg_regex):
                 list(_fasta_to_generator(fp, **kwargs))
 
     # light testing of fasta -> object readers to ensure interface is present
@@ -520,9 +516,9 @@ class ReaderTests(TestCase):
 
             # empty file
             empty_fp = get_data_path('empty')
-            with six.assertRaisesRegex(self, ValueError, '1st sequence'):
+            with self.assertRaisesRegex(ValueError, '1st sequence'):
                 reader_fn(empty_fp)
-            with six.assertRaisesRegex(self, ValueError, '1st sequence'):
+            with self.assertRaisesRegex(ValueError, '1st sequence'):
                 reader_fn(empty_fp, qual=empty_fp)
 
             # the sequences in the following files don't necessarily make sense
@@ -612,19 +608,17 @@ class ReaderTests(TestCase):
                     self.assertEqual(obs, exp)
 
                 # seq_num too large
-                with six.assertRaisesRegex(self, ValueError, '8th sequence'):
+                with self.assertRaisesRegex(ValueError, '8th sequence'):
                     reader_fn(fasta_fp, seq_num=8)
                 for qual_fp in qual_fps:
-                    with six.assertRaisesRegex(self, ValueError,
-                                               '8th sequence'):
+                    with self.assertRaisesRegex(ValueError, '8th sequence'):
                         reader_fn(fasta_fp, seq_num=8, qual=qual_fp)
 
                 # seq_num too small
-                with six.assertRaisesRegex(self, ValueError, '`seq_num`=0'):
+                with self.assertRaisesRegex(ValueError, '`seq_num`=0'):
                     reader_fn(fasta_fp, seq_num=0)
                 for qual_fp in qual_fps:
-                    with six.assertRaisesRegex(self, ValueError,
-                                               '`seq_num`=0'):
+                    with self.assertRaisesRegex(ValueError, '`seq_num`=0'):
                         reader_fn(fasta_fp, seq_num=0, qual=qual_fp)
 
     def test_fasta_to_tabular_msa(self):
@@ -656,7 +650,7 @@ class ReaderTests(TestCase):
                     self.assertEqual(obs, exp)
 
     def test_fasta_to_tabular_msa_no_constructor(self):
-        with six.assertRaisesRegex(self, ValueError, '`constructor`'):
+        with self.assertRaisesRegex(ValueError, '`constructor`'):
             _fasta_to_tabular_msa(get_data_path('fasta_single_seq'))
 
 
@@ -714,8 +708,7 @@ class WriterTests(TestCase):
         self.msa = TabularMSA(seqs)
 
         def empty_gen():
-            return
-            yield
+            yield from ()
 
         def single_seq_gen():
             yield self.bio_seq1
@@ -741,19 +734,17 @@ class WriterTests(TestCase):
         # including exercising the different splitting algorithms used for
         # sequence data vs. quality scores
         def multi_seq_gen():
-            for seq in (self.bio_seq1, self.bio_seq2, self.bio_seq3,
-                        self.dna_seq, self.rna_seq, self.prot_seq):
-                yield seq
+            yield from (self.bio_seq1, self.bio_seq2, self.bio_seq3,
+                        self.dna_seq, self.rna_seq, self.prot_seq)
 
         # can be serialized if no qual file is provided, else it should raise
         # an error because one seq has qual scores and the other doesn't
         def mixed_qual_score_gen():
-            missing_qual_seq = DNA(
-                'AAAAT', metadata={'id': 'da,dadadada',
-                                   'description': '10 hours'},
-                lowercase='introns')
-            for seq in self.bio_seq1, missing_qual_seq:
-                yield seq
+            yield self.bio_seq1
+            yield DNA('AAAAT',
+                      metadata={'id': 'da,dadadada',
+                                'description': '10 hours'},
+                      lowercase='introns')
 
         self.mixed_qual_score_gen = mixed_qual_score_gen()
 
@@ -812,8 +803,7 @@ class WriterTests(TestCase):
         ]))
 
         def blank_seq_gen():
-            for seq in self.bio_seq1, Sequence(''):
-                yield seq
+            yield from (self.bio_seq1, Sequence(''))
 
         # generators or parameter combos that cannot be written in fasta
         # format, paired with kwargs (if any), error type, and expected error
@@ -881,7 +871,7 @@ class WriterTests(TestCase):
     def test_generator_to_fasta_invalid_input(self):
         for obj, kwargs, error_type, error_msg_regexp in self.invalid_objs:
             fh = io.StringIO()
-            with six.assertRaisesRegex(self, error_type, error_msg_regexp):
+            with self.assertRaisesRegex(error_type, error_msg_regexp):
                 _generator_to_fasta(obj, fh, **kwargs)
             fh.close()
 
diff --git a/skbio/io/format/tests/test_fastq.py b/skbio/io/format/tests/test_fastq.py
index 337429e..c6b5644 100644
--- a/skbio/io/format/tests/test_fastq.py
+++ b/skbio/io/format/tests/test_fastq.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import zip
-import six
-
 import io
 import string
 import unittest
@@ -138,6 +134,11 @@ class TestSniffer(unittest.TestCase):
         for fp in self.negatives:
             self.assertEqual(_fastq_sniffer(fp), (False, {}))
 
+    def test_illumina_sniffed(self):
+        fp = get_data_path('fastq_single_seq_illumina1.8')
+        self.assertEqual(_fastq_sniffer(fp), (True, {'variant':
+                                                     'illumina1.8'}))
+
 
 class TestReaders(unittest.TestCase):
     def setUp(self):
@@ -322,11 +323,11 @@ class TestReaders(unittest.TestCase):
         # phred offsets
         for fp, error_type, error_msg_regex in self.invalid_files:
             for variant in 'sanger', 'illumina1.3', 'illumina1.8':
-                with six.assertRaisesRegex(self, error_type, error_msg_regex):
+                with self.assertRaisesRegex(error_type, error_msg_regex):
                     list(_fastq_to_generator(fp, variant=variant))
 
             for offset in 33, 64, 40, 77:
-                with six.assertRaisesRegex(self, error_type, error_msg_regex):
+                with self.assertRaisesRegex(error_type, error_msg_regex):
                     list(_fastq_to_generator(fp, phred_offset=offset))
 
     def test_fastq_to_generator_invalid_files_illumina(self):
@@ -336,17 +337,15 @@ class TestReaders(unittest.TestCase):
                'solexa_full_range_original_solexa.fastq']]
 
         for fp in fps:
-            with six.assertRaisesRegex(self, ValueError,
-                                       'out of range \[0, 62\]'):
+            with self.assertRaisesRegex(ValueError, 'out of range \[0, 62\]'):
                 list(_fastq_to_generator(fp, variant='illumina1.3'))
-            with six.assertRaisesRegex(self, ValueError,
-                                       'out of range \[0, 62\]'):
+            with self.assertRaisesRegex(ValueError, 'out of range \[0, 62\]'):
                 list(_fastq_to_generator(fp, variant='illumina1.8'))
 
     def test_fastq_to_generator_solexa(self):
         # solexa support isn't implemented yet. should raise error even with
         # valid solexa file
-        with six.assertRaisesRegex(self, ValueError, 'Solexa'):
+        with self.assertRaisesRegex(ValueError, 'Solexa'):
             list(_fastq_to_generator(
                 get_data_path('solexa_full_range_original_solexa.fastq'),
                 variant='solexa'))
@@ -405,7 +404,7 @@ class TestReaders(unittest.TestCase):
 
             @classproperty
             @overrides(GrammaredSequence)
-            def nondegenerate_chars(cls):
+            def definite_chars(cls):
                 return set(string.ascii_letters)
 
             @classproperty
@@ -438,7 +437,7 @@ class TestReaders(unittest.TestCase):
                     self.assertEqual(observed, expected)
 
     def test_fastq_to_tabular_msa_no_constructor(self):
-        with six.assertRaisesRegex(self, ValueError, '`constructor`'):
+        with self.assertRaisesRegex(ValueError, '`constructor`'):
             _fastq_to_tabular_msa(get_data_path('fastq_multi_seq_sanger'))
 
 
@@ -546,7 +545,7 @@ class TestWriters(unittest.TestCase):
                            positional_metadata={'quality': range(4)})
             yield Sequence('ACG', metadata={'id': 'foo', 'description': 'bar'})
 
-        with six.assertRaisesRegex(self, ValueError, '2nd.*quality scores'):
+        with self.assertRaisesRegex(ValueError, '2nd.*quality scores'):
             _generator_to_fastq(gen(), io.StringIO(), variant='illumina1.8')
 
 
diff --git a/skbio/io/format/tests/test_genbank.py b/skbio/io/format/tests/test_genbank.py
index 75003f3..e3eeb14 100644
--- a/skbio/io/format/tests/test_genbank.py
+++ b/skbio/io/format/tests/test_genbank.py
@@ -6,14 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import map, zip
-
 import io
 import numpy as np
 import pandas as pd
 import numpy.testing as npt
-import six
 from unittest import TestCase, main
 
 from skbio import Protein, DNA, RNA, Sequence
@@ -95,7 +91,7 @@ class GenBankIOTests(TestCase):
             'gugaaacaaagcacuauugcacuggcugucuuaccguuacuguuuaccccugugacaaaagcc',
             {'ACCESSION': 'M14399',
              'COMMENT': 'Original source text: E.coli, cDNA to mRNA.',
-             'DEFINITION': u"alkaline phosphatase signal mRNA, 5' end.",
+             'DEFINITION': "alkaline phosphatase signal mRNA, 5' end.",
              'FEATURES': [{'db_xref': '"taxon:562"',
                            'index_': 0,
                            'left_partial_': False,
@@ -257,8 +253,8 @@ REFERENCE   1  (bases 1 to 154478)
             ['LOCUS       NP_001832                360 aa'
              '            linear   PRI 2001-12-18']]
         for line in lines:
-            with six.assertRaisesRegex(self, GenBankFormatError,
-                                       'Could not parse the LOCUS line:.*'):
+            with self.assertRaisesRegex(GenBankFormatError,
+                                        'Could not parse the LOCUS line:.*'):
                 _parse_locus(line)
 
     def test_parse_section_default(self):
@@ -329,9 +325,9 @@ REFERENCE   1  (bases 1 to 154478)
             'abc',
             '3-8']
         for example in examples:
-            with six.assertRaisesRegex(self, GenBankFormatError,
-                                       'Could not parse location string: '
-                                       '"%s"' % example):
+            with self.assertRaisesRegex(GenBankFormatError,
+                                        'Could not parse location string: '
+                                        '"%s"' % example):
                 _parse_loc_str(example, length)
 
     def test_genbank_to_generator_single(self):
diff --git a/skbio/io/format/tests/test_lsmat.py b/skbio/io/format/tests/test_lsmat.py
index 44fab52..b4499a3 100644
--- a/skbio/io/format/tests/test_lsmat.py
+++ b/skbio/io/format/tests/test_lsmat.py
@@ -6,12 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
+import io
 from unittest import TestCase, main
 
-from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix
 from skbio.io import LSMatFormatError
 from skbio.io.format.lsmat import (
@@ -22,12 +19,12 @@ from skbio.stats.distance import DissimilarityMatrix, DistanceMatrixError
 
 class LSMatTestData(TestCase):
     def setUp(self):
-        self.lsmat_1x1_fh = StringIO(LSMat_1x1)
-        self.lsmat_2x2_fh = StringIO(LSMat_2x2)
-        self.lsmat_2x2_asym_fh = StringIO(LSMat_2x2_ASYM)
-        self.lsmat_3x3_fh = StringIO(LSMat_3x3)
-        self.lsmat_3x3_whitespace_fh = StringIO(LSMat_3x3_WHITESPACE)
-        self.lsmat_3x3_csv_fh = StringIO(LSMat_3x3_CSV)
+        self.lsmat_1x1_fh = io.StringIO(LSMat_1x1)
+        self.lsmat_2x2_fh = io.StringIO(LSMat_2x2)
+        self.lsmat_2x2_asym_fh = io.StringIO(LSMat_2x2_ASYM)
+        self.lsmat_3x3_fh = io.StringIO(LSMat_3x3)
+        self.lsmat_3x3_whitespace_fh = io.StringIO(LSMat_3x3_WHITESPACE)
+        self.lsmat_3x3_csv_fh = io.StringIO(LSMat_3x3_CSV)
 
         self.valid_fhs = [
             self.lsmat_1x1_fh,
@@ -37,13 +34,13 @@ class LSMatTestData(TestCase):
             self.lsmat_3x3_whitespace_fh
         ]
 
-        self.empty_fh = StringIO()
-        self.invalid_1_fh = StringIO(INVALID_1)
-        self.invalid_2_fh = StringIO(INVALID_2)
-        self.invalid_3_fh = StringIO(INVALID_3)
-        self.invalid_4_fh = StringIO(INVALID_4)
-        self.invalid_5_fh = StringIO(INVALID_5)
-        self.invalid_6_fh = StringIO(INVALID_6)
+        self.empty_fh = io.StringIO()
+        self.invalid_1_fh = io.StringIO(INVALID_1)
+        self.invalid_2_fh = io.StringIO(INVALID_2)
+        self.invalid_3_fh = io.StringIO(INVALID_3)
+        self.invalid_4_fh = io.StringIO(INVALID_4)
+        self.invalid_5_fh = io.StringIO(INVALID_5)
+        self.invalid_6_fh = io.StringIO(INVALID_6)
 
         self.invalid_fhs = [
             (self.empty_fh, 'empty'),
@@ -121,8 +118,8 @@ class DissimilarityAndDistanceMatrixReaderWriterTests(LSMatTestData):
     def test_read_invalid_files(self):
         for fn in _lsmat_to_dissimilarity_matrix, _lsmat_to_distance_matrix:
             for invalid_fh, error_msg_regexp in self.invalid_fhs:
-                with six.assertRaisesRegex(self, LSMatFormatError,
-                                           error_msg_regexp):
+                with self.assertRaisesRegex(LSMatFormatError,
+                                            error_msg_regexp):
                     invalid_fh.seek(0)
                     fn(invalid_fh)
 
@@ -136,7 +133,7 @@ class DissimilarityAndDistanceMatrixReaderWriterTests(LSMatTestData):
                                (_distance_matrix_to_lsmat, self.dist_objs,
                                 self.dist_strs)):
             for obj, str_ in zip(objs, strs):
-                fh = StringIO()
+                fh = io.StringIO()
                 fn(obj, fh)
                 obs = fh.getvalue()
                 fh.close()
@@ -146,7 +143,7 @@ class DissimilarityAndDistanceMatrixReaderWriterTests(LSMatTestData):
         for fn, cls in ((_dissimilarity_matrix_to_lsmat, DissimilarityMatrix),
                         (_distance_matrix_to_lsmat, DistanceMatrix)):
             obj = cls(self.lsmat_3x3_data, ['a', 'b', 'c'])
-            fh = StringIO()
+            fh = io.StringIO()
             fn(obj, fh, delimiter=',')
             obs = fh.getvalue()
             fh.close()
@@ -165,7 +162,7 @@ class DissimilarityAndDistanceMatrixReaderWriterTests(LSMatTestData):
                 lsmat1 = reader_fn(fh)
 
                 # Write.
-                out_fh = StringIO()
+                out_fh = io.StringIO()
                 writer_fn(lsmat1, out_fh)
                 out_fh.seek(0)
 
diff --git a/skbio/io/format/tests/test_newick.py b/skbio/io/format/tests/test_newick.py
index 1e3bd14..0318d97 100644
--- a/skbio/io/format/tests/test_newick.py
+++ b/skbio/io/format/tests/test_newick.py
@@ -6,15 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
+import io
 import unittest
 
 from skbio import TreeNode
 from skbio.io import NewickFormatError
 from skbio.io.format.newick import (
     _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer)
-from skbio.io._fileobject import StringIO
 
 
 class TestNewick(unittest.TestCase):
@@ -302,7 +300,7 @@ class TestNewick(unittest.TestCase):
     def test_newick_to_tree_node_valid_files(self):
         for tree, newicks in self.trees_newick_lists:
             for newick in newicks:
-                fh = StringIO(newick)
+                fh = io.StringIO(newick)
                 read_tree = _newick_to_tree_node(fh)
 
                 self._assert_equal(tree, read_tree)
@@ -311,7 +309,7 @@ class TestNewick(unittest.TestCase):
 
     def test_newick_to_tree_node_invalid_files(self):
         for invalid, error_fragments in self.invalid_newicks:
-            fh = StringIO(invalid)
+            fh = io.StringIO(invalid)
             with self.assertRaises(NewickFormatError) as cm:
                 _newick_to_tree_node(fh)
             for frag in error_fragments:
@@ -321,7 +319,7 @@ class TestNewick(unittest.TestCase):
     def test_tree_node_to_newick(self):
         for tree, newicks in self.trees_newick_lists:
             newick = newicks[0]
-            fh = StringIO()
+            fh = io.StringIO()
             _tree_node_to_newick(tree, fh)
 
             self.assertEqual(newick, fh.getvalue())
@@ -331,9 +329,9 @@ class TestNewick(unittest.TestCase):
     def test_roundtrip(self):
         for tree, newicks in self.trees_newick_lists:
             newick = newicks[0]
-            fh = StringIO(newick)
+            fh = io.StringIO(newick)
             tree = _newick_to_tree_node(fh)
-            fh2 = StringIO()
+            fh2 = io.StringIO()
             _tree_node_to_newick(tree, fh2)
             fh2.seek(0)
             tree2 = _newick_to_tree_node(fh2)
@@ -345,9 +343,9 @@ class TestNewick(unittest.TestCase):
             fh2.close()
 
     def test_newick_to_tree_node_convert_underscores(self):
-        fh = StringIO('(_:0.1, _a, _b)__;')
+        fh = io.StringIO('(_:0.1, _a, _b)__;')
         tree = _newick_to_tree_node(fh, convert_underscores=False)
-        fh2 = StringIO()
+        fh2 = io.StringIO()
         _tree_node_to_newick(tree, fh2)
         self.assertEqual(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n")
         fh2.close()
@@ -356,13 +354,13 @@ class TestNewick(unittest.TestCase):
     def test_newick_sniffer_valid_files(self):
         for _, newicks in self.trees_newick_lists:
             for newick in newicks:
-                fh = StringIO(newick)
+                fh = io.StringIO(newick)
                 self.assertEqual(_newick_sniffer(fh), (True, {}))
                 fh.close()
 
     def test_newick_sniffer_invalid_files(self):
         for invalid, _ in self.invalid_newicks:
-            fh = StringIO(invalid)
+            fh = io.StringIO(invalid)
             self.assertEqual(_newick_sniffer(fh), (False, {}))
             fh.close()
 
diff --git a/skbio/io/format/tests/test_ordination.py b/skbio/io/format/tests/test_ordination.py
index 1cf87ef..2c37a7d 100644
--- a/skbio/io/format/tests/test_ordination.py
+++ b/skbio/io/format/tests/test_ordination.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import io
 from unittest import TestCase, main
 
@@ -197,12 +194,12 @@ class OrdinationResultsReaderWriterTests(OrdinationTestData):
                 obs = _ordination_to_ordination_results(fp)
                 assert_ordination_results_equal(
                     obs, obj, ignore_method_names=True,
-                    ignore_axis_labels=True, ignore_biplot_scores_labels=True)
+                    ignore_axis_labels=True)
 
     def test_read_invalid_files(self):
         for invalid_fp, error_msg_regexp, _ in self.invalid_fps:
-            with six.assertRaisesRegex(self, OrdinationFormatError,
-                                       error_msg_regexp):
+            with self.assertRaisesRegex(OrdinationFormatError,
+                                        error_msg_regexp):
                 _ordination_to_ordination_results(invalid_fp)
 
     def test_write(self):
diff --git a/skbio/io/format/tests/test_phylip.py b/skbio/io/format/tests/test_phylip.py
index 6106e4f..757a2ff 100644
--- a/skbio/io/format/tests/test_phylip.py
+++ b/skbio/io/format/tests/test_phylip.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import io
 import unittest
 
@@ -150,11 +147,11 @@ class TestReaders(unittest.TestCase):
 
     def test_phylip_to_tabular_msa_invalid_files(self):
         for fp, error_type, error_msg_regex in self.invalid_files:
-            with six.assertRaisesRegex(self, error_type, error_msg_regex):
+            with self.assertRaisesRegex(error_type, error_msg_regex):
                 _phylip_to_tabular_msa(fp, constructor=DNA)
 
     def test_phylip_to_tabular_msa_no_constructor(self):
-        with six.assertRaisesRegex(self, ValueError, '`constructor`'):
+        with self.assertRaisesRegex(ValueError, '`constructor`'):
             _phylip_to_tabular_msa(get_data_path('phylip_dna_3_seqs'))
 
     def test_phylip_to_tabular_msa_valid_files(self):
@@ -245,12 +242,11 @@ class TestWriters(unittest.TestCase):
     def test_write_invalid_alignment(self):
         for invalid_obj, error_msg_regexp in self.invalid_objs:
             fh = io.StringIO()
-            with six.assertRaisesRegex(self, PhylipFormatError,
-                                       error_msg_regexp):
+            with self.assertRaisesRegex(PhylipFormatError, error_msg_regexp):
                 _tabular_msa_to_phylip(invalid_obj, fh)
 
             # ensure nothing was written to the file before the error was
-            # thrown. TODO remove this check when #674 is resolved
+            # thrown
             obs = fh.getvalue()
             fh.close()
             self.assertEqual(obs, '')
diff --git a/skbio/io/format/tests/test_qseq.py b/skbio/io/format/tests/test_qseq.py
index bc4e1a4..5b6bd60 100644
--- a/skbio/io/format/tests/test_qseq.py
+++ b/skbio/io/format/tests/test_qseq.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-from future.builtins import zip
-
 import unittest
 
 from skbio import Sequence, DNA, RNA, Protein
diff --git a/skbio/io/format/tests/test_stockholm.py b/skbio/io/format/tests/test_stockholm.py
index d3a72d0..69cfb21 100644
--- a/skbio/io/format/tests/test_stockholm.py
+++ b/skbio/io/format/tests/test_stockholm.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-import six
-
 import pandas as pd
 
 import io
@@ -30,7 +26,8 @@ class TestStockholmSniffer(unittest.TestCase):
             'stockholm_extensive',
             'stockholm_minimal',
             'stockholm_rna',
-            'stockholm_runon_gf',
+            'stockholm_runon_gf_with_whitespace',
+            'stockholm_runon_gf_no_whitespace',
             'stockholm_duplicate_sequence_names',
             'stockholm_duplicate_gr',
             'stockholm_duplicate_gc',
@@ -53,14 +50,26 @@ class TestStockholmSniffer(unittest.TestCase):
             'stockholm_metadata_only',
             'stockholm_multiple_msa',
             'stockholm_multiple_trees',
-            'stockholm_runon_gs',
+            'stockholm_runon_gs_with_whitespace',
+            'stockholm_runon_gs_no_whitespace',
             'stockholm_single_tree_with_id',
             'stockholm_single_tree_without_id',
             'stockholm_whitespace_only_lines',
             'stockholm_all_data_types',
             'stockholm_two_of_each_metadata',
             'stockholm_data_only',
-            'stockholm_nonstring_labels'
+            'stockholm_nonstring_labels',
+            'stockholm_missing_reference_items',
+            'stockholm_multiple_references',
+            'stockholm_runon_references',
+            'stockholm_runon_references_mixed',
+            'stockholm_single_reference',
+            'stockholm_missing_reference_items',
+            'stockholm_missing_rn_tag',
+            'stockholm_different_padding',
+            'stockholm_multi_line_tree_no_id',
+            'stockholm_multi_line_tree_with_id',
+            'stockholm_multiple_multi_line_trees'
             ]]
 
         self.negatives = [get_data_path(e) for e in [
@@ -179,7 +188,7 @@ class TestStockholmReader(unittest.TestCase):
         self.assertEqual(msa, exp)
 
     def test_stockholm_runon_gf(self):
-        fp = get_data_path('stockholm_runon_gf')
+        fp = get_data_path('stockholm_runon_gf_no_whitespace')
         msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
         exp = TabularMSA([DNA('ACTGGTTCAATG')],
                          metadata={'CC': 'CBS domains are small intracellular'
@@ -187,14 +196,20 @@ class TestStockholmReader(unittest.TestCase):
                                          'copies within a protein.'},
                          index=['GG1344'])
         self.assertEqual(msa, exp)
+        fp = get_data_path('stockholm_runon_gf_with_whitespace')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        self.assertEqual(msa, exp)
 
     def test_stockholm_runon_gs(self):
-        fp = get_data_path('stockholm_runon_gs')
+        fp = get_data_path('stockholm_runon_gs_no_whitespace')
         msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
         exp = TabularMSA([DNA('ATCGTTCAGTG',
-                              metadata={'AL': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'})],
+                              metadata={'LN': 'This is a runon GS line.'})],
                          index=['seq1'])
         self.assertEqual(msa, exp)
+        fp = get_data_path('stockholm_runon_gs_with_whitespace')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        self.assertEqual(msa, exp)
 
     def test_stockholm_metadata_only(self):
         fp = get_data_path('stockholm_metadata_only')
@@ -241,6 +256,105 @@ class TestStockholmReader(unittest.TestCase):
                                               'tree3': 'IJKL'}})
         self.assertEqual(msa, exp)
 
+    def test_stockhom_single_reference(self):
+        fp = get_data_path('stockholm_single_reference')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RM', '123456789'),
+                                          ('RT', 'A Title'),
+                                          ('RA', 'The Author'),
+                                          ('RL', 'A Location'),
+                                          ('RC', 'Comment')])]})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_multiple_references(self):
+        fp = get_data_path('stockholm_multiple_references')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RM', '123456789'),
+                                          ('RT', 'Title 1'),
+                                          ('RA', 'Author 1'),
+                                          ('RL', 'Location 1'),
+                                          ('RC', 'Comment 1')]),
+                             OrderedDict([('RM', '987654321'),
+                                          ('RT', 'Title 2'),
+                                          ('RA', 'Author 2'),
+                                          ('RL', 'Location 2'),
+                                          ('RC', 'Comment 2')]),
+                             OrderedDict([('RM', '132465879'),
+                                          ('RT', 'Title 3'),
+                                          ('RA', 'Author 3'),
+                                          ('RL', 'Location 3'),
+                                          ('RC', 'Comment 3')])]})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_runon_references(self):
+        fp = get_data_path('stockholm_runon_references')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RM', '123456789'),
+                                          ('RT', 'A Runon Title'),
+                                          ('RA', 'The Author'),
+                                          ('RL', 'A Location'),
+                                          ('RC', 'A Runon Comment')])]})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_mixed_runon_references(self):
+        fp = get_data_path('stockholm_runon_references_mixed')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RC', 'A Runon Comment'),
+                                          ('RM', '123456789'),
+                                          ('RT', 'A Runon Title'),
+                                          ('RA', 'The Author'),
+                                          ('RL', 'A Location')])]})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_to_msa_different_padding(self):
+        fp = get_data_path('stockholm_different_padding')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RC',
+                                           'A Runon Comment Without '
+                                           'Whitespace')]),
+                             OrderedDict([('RC',
+                                           'A Runon Comment With '
+                                           'Whitespace')])]})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_handles_missing_reference_items(self):
+        fp = get_data_path('stockholm_missing_reference_items')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RT', 'A Title'),
+                                          ('RA', 'The Author')])]})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_multi_line_tree_no_id(self):
+        fp = get_data_path('stockholm_multi_line_tree_no_id')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA([], metadata={'NH': 'ABCDEFGH'})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_multiple_multi_line_trees(self):
+        fp = get_data_path('stockholm_multiple_multi_line_trees')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA([], metadata={'NH': {'tree1': 'ABCDEFGH',
+                                              'tree2': 'IJKLMNOP'}})
+        self.assertEqual(msa, exp)
+
+    def test_stockholm_multi_line_tree_with_id(self):
+        fp = get_data_path('stockholm_multi_line_tree_with_id')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        exp = TabularMSA([], metadata={'NH': {'tree1': 'ABCDEFGH'}})
+        self.assertEqual(msa, exp)
+
     def test_multiple_msa_file(self):
         fp = get_data_path('stockholm_multiple_msa')
         msa = _stockholm_to_tabular_msa(fp, constructor=RNA)
@@ -272,134 +386,128 @@ class TestStockholmReader(unittest.TestCase):
 
     def test_stockholm_duplicate_tree_id_error(self):
         fp = get_data_path('stockholm_duplicate_tree_ids')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Tree.*tree1.*in file.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Tree.*tree1.*in file.'):
+            _stockholm_to_tabular_msa(fp, constructor=DNA)
+
+    def test_stockholm_missing_reference_number_error(self):
+        fp = get_data_path('stockholm_missing_rn_tag')
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    "Expected 'RN'.*'RL' tag."):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_nonexistent_gr_error(self):
         fp = get_data_path('stockholm_invalid_nonexistent_gr')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'GS or GR.*nonexistent sequence.*RL1355.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'GS or GR.*nonexistent sequence.*RL1355.'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_nonexistent_gs_error(self):
         fp = get_data_path('stockholm_invalid_nonexistent_gs')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'GS or GR.*nonexistent sequence.*AC14.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'GS or GR.*nonexistent sequence.*AC14.'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_duplicate_sequence_names_error(self):
         fp = get_data_path('stockholm_duplicate_sequence_names')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'duplicate sequence name.*ASR132.*supported'
-                                   ' by the reader.'):
+        with self.assertRaisesRegex(
+                StockholmFormatError,
+                'duplicate sequence name.*ASR132.*supported by the reader.'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_duplicate_gr_error(self):
         fp = get_data_path('stockholm_duplicate_gr')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Found duplicate GR.*OS.*LFDR3.*supported '
-                                   'by the reader.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Found duplicate GR.*OS.*LFDR3.*supported '
+                                    'by the reader.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_duplicate_gc_error(self):
         fp = get_data_path('stockholm_duplicate_gc')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Found duplicate GC.*SS_cons.*supported '
-                                   'by the reader.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Found duplicate GC.*SS_cons.*supported '
+                                    'by the reader.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_empty_file_error(self):
         fp = get_data_path('empty')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'File is empty.'):
+        with self.assertRaisesRegex(StockholmFormatError, 'File is empty.'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_missing_header_error(self):
         fp = get_data_path('stockholm_missing_header')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'File missing.*header'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'File missing.*header'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_missing_footer_error(self):
         fp = get_data_path('stockholm_missing_footer')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Final line.*only "//".'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Final line.*only "//".'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_data_type_error(self):
         fp = get_data_path('stockholm_invalid_data_type')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   "Unrecognized.*'#=GZ"):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    "Unrecognized.*'#=GZ"):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_malformed_gf_line_error(self):
         fp = get_data_path('stockholm_malformed_gf_line')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Line contains 2.*must contain.*3.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Line contains 2.*must contain.*3.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_malformed_gs_line_error(self):
         fp = get_data_path('stockholm_malformed_gs_line')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Line contains 3.*must contain.*4.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Line contains 3.*must contain.*4.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_malformed_gr_line_error(self):
         fp = get_data_path('stockholm_malformed_gr_line')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Line contains 2.*must contain.*4.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Line contains 2.*must contain.*4.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_malformed_gc_line_error(self):
         fp = get_data_path('stockholm_malformed_gc_line')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Line contains 2.*must contain.*3.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Line contains 2.*must contain.*3.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_malformed_data_line_error(self):
         fp = get_data_path('stockholm_malformed_data_line')
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Line contains 1.*must contain.*2.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Line contains 1.*must contain.*2.'):
             _stockholm_to_tabular_msa(fp, constructor=DNA)
 
     def test_differing_sequence_lengths_error(self):
         fp = get_data_path('stockholm_differing_seq_lengths')
-        with six.assertRaisesRegex(self, ValueError,
-                                   'Each sequence.*11 != 10'):
+        with self.assertRaisesRegex(ValueError, 'Each sequence.*11 != 10'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_differing_data_lengths_gr_error(self):
         fp = get_data_path('stockholm_differing_gr_data_length')
-        with six.assertRaisesRegex(self, ValueError,
-                                   'Number.*7.*(8).'):
+        with self.assertRaisesRegex(ValueError, 'Number.*7.*(8).'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_differing_data_lengths_gc_error(self):
         fp = get_data_path('stockholm_differing_gc_data_length')
-        with six.assertRaisesRegex(self, ValueError,
-                                   'Number.*12.*(10).'):
+        with self.assertRaisesRegex(ValueError, 'Number.*12.*(10).'):
             _stockholm_to_tabular_msa(fp, constructor=RNA)
 
     def test_no_constructor_error(self):
         fp = get_data_path('empty')
-        with six.assertRaisesRegex(self, ValueError, 'Must.*parameter.'):
+        with self.assertRaisesRegex(ValueError, 'Must provide.*parameter.'):
             _stockholm_to_tabular_msa(fp)
 
     def test_unsupported_constructor_error(self):
         fp = get_data_path('empty')
-        with six.assertRaisesRegex(self, TypeError,
-                                   '`constructor`.*`GrammaredSequence`'):
+        with self.assertRaisesRegex(TypeError,
+                                    '`constructor`.*`GrammaredSequence`.'):
             _stockholm_to_tabular_msa(fp, constructor=TabularMSA)
 
-    def test_handles_missing_metadata_efficiently(self):
-        msa = _stockholm_to_tabular_msa(get_data_path('stockholm_minimal'),
-                                        constructor=DNA)
-        self.assertIsNone(msa._metadata)
-        self.assertIsNone(msa._positional_metadata)
-        self.assertIsNone(msa[0]._metadata)
-        self.assertIsNone(msa[0]._positional_metadata)
-
 
 class TestStockholmWriter(unittest.TestCase):
     def test_msa_to_stockholm_extensive(self):
@@ -487,6 +595,50 @@ class TestStockholmWriter(unittest.TestCase):
             exp = fh.read()
         self.assertEqual(obs, exp)
 
+    def test_msa_to_stockholm_single_reference(self):
+        fp = get_data_path('stockholm_single_reference')
+        msa = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RM', '123456789'),
+                                          ('RT', 'A Title'),
+                                          ('RA', 'The Author'),
+                                          ('RL', 'A Location'),
+                                          ('RC', 'Comment')])]})
+        fh = io.StringIO()
+        _tabular_msa_to_stockholm(msa, fh)
+        obs = fh.getvalue()
+        fh.close()
+        with io.open(fp) as fh:
+            exp = fh.read()
+        self.assertEqual(obs, exp)
+
+    def test_msa_to_stockholm_multiple_references(self):
+        fp = get_data_path('stockholm_multiple_references')
+        msa = TabularMSA(
+            [],
+            metadata={'RN': [OrderedDict([('RM', '123456789'),
+                                          ('RT', 'Title 1'),
+                                          ('RA', 'Author 1'),
+                                          ('RL', 'Location 1'),
+                                          ('RC', 'Comment 1')]),
+                             OrderedDict([('RM', '987654321'),
+                                          ('RT', 'Title 2'),
+                                          ('RA', 'Author 2'),
+                                          ('RL', 'Location 2'),
+                                          ('RC', 'Comment 2')]),
+                             OrderedDict([('RM', '132465879'),
+                                          ('RT', 'Title 3'),
+                                          ('RA', 'Author 3'),
+                                          ('RL', 'Location 3'),
+                                          ('RC', 'Comment 3')])]})
+        fh = io.StringIO()
+        _tabular_msa_to_stockholm(msa, fh)
+        obs = fh.getvalue()
+        fh.close()
+        with io.open(fp) as fh:
+            exp = fh.read()
+        self.assertEqual(obs, exp)
+
     def test_msa_to_stockholm_data_only(self):
         fp = get_data_path('stockholm_data_only')
         msa = TabularMSA([RNA('ACUCCGACAUGCUCC'),
@@ -573,6 +725,39 @@ class TestStockholmWriter(unittest.TestCase):
             exp = fh.read()
         self.assertEqual(obs, exp)
 
+    def test_round_trip_single_reference(self):
+        fp = get_data_path('stockholm_single_reference')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        fh = io.StringIO()
+        _tabular_msa_to_stockholm(msa, fh)
+        obs = fh.getvalue()
+        fh.close()
+        with io.open(fp) as fh:
+            exp = fh.read()
+        self.assertEqual(obs, exp)
+
+    def test_round_trip_multiple_references(self):
+        fp = get_data_path('stockholm_multiple_references')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        fh = io.StringIO()
+        _tabular_msa_to_stockholm(msa, fh)
+        obs = fh.getvalue()
+        fh.close()
+        with io.open(fp) as fh:
+            exp = fh.read()
+        self.assertEqual(obs, exp)
+
+    def test_round_trip_missing_references(self):
+        fp = get_data_path('stockholm_missing_reference_items')
+        msa = _stockholm_to_tabular_msa(fp, constructor=DNA)
+        fh = io.StringIO()
+        _tabular_msa_to_stockholm(msa, fh)
+        obs = fh.getvalue()
+        fh.close()
+        with io.open(fp) as fh:
+            exp = fh.read()
+        self.assertEqual(obs, exp)
+
     def test_round_trip_data_only(self):
         fp = get_data_path('stockholm_data_only')
         msa = _stockholm_to_tabular_msa(fp, constructor=RNA)
@@ -606,20 +791,11 @@ class TestStockholmWriter(unittest.TestCase):
             exp = fh.read()
         self.assertEqual(obs, exp)
 
-    def test_handles_missing_metadata_efficiently(self):
-        msa = TabularMSA([DNA('ACTG'), DNA('GTCA')], index=['seq1', 'seq2'])
-        fh = io.StringIO()
-        _tabular_msa_to_stockholm(msa, fh)
-        self.assertIsNone(msa._metadata)
-        self.assertIsNone(msa._positional_metadata)
-        self.assertIsNone(msa[0]._metadata)
-        self.assertIsNone(msa[0]._positional_metadata)
-
     def test_unoriginal_index_error(self):
         msa = TabularMSA([DNA('ATCGCCAGCT'), DNA('TTGTGCTGGC')],
                          index=['seq1', 'seq1'])
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'index labels must be unique.'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'index labels must be unique.'):
             fh = io.StringIO()
             _tabular_msa_to_stockholm(msa, fh)
 
@@ -639,9 +815,9 @@ class TestStockholmWriter(unittest.TestCase):
         msa = TabularMSA([DNA('CGTCAATCTCGAACT',
                           positional_metadata=pos_metadata_dataframe)],
                          index=['seq1'])
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Sequence-specific positional metadata.*'
-                                   'must be unique. Found 1 duplicate'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Sequence-specific positional metadata.*'
+                                    'must be unique. Found 1 duplicate'):
             fh = io.StringIO()
             _tabular_msa_to_stockholm(msa, fh)
 
@@ -660,10 +836,10 @@ class TestStockholmWriter(unittest.TestCase):
                                                                 'TG'))])
         msa = TabularMSA([DNA('CCCCTGCTTTCGTAG')],
                          positional_metadata=pos_metadata_dataframe)
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Multiple sequence alignment positional '
-                                   'metadata.*must be unique. Found 2 '
-                                   'duplicate'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Multiple sequence alignment positional '
+                                    'metadata.*must be unique. Found 2 '
+                                    'duplicate'):
             fh = io.StringIO()
             _tabular_msa_to_stockholm(msa, fh)
 
@@ -675,10 +851,10 @@ class TestStockholmWriter(unittest.TestCase):
                                                'AS': list('TCAGCTCTGCAGCGTT')})
         msa = TabularMSA([DNA('TCCTTGAACTACCCGA',
                               positional_metadata=pos_metadata_dataframe)])
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Sequence-specific positional metadata.*'
-                                   'must contain a single character.*Found '
-                                   'value\(s\) in column AC'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Sequence-specific positional metadata.*'
+                                    'must contain a single character.*Found '
+                                    'value\(s\) in column AC'):
             fh = io.StringIO()
             _tabular_msa_to_stockholm(msa, fh)
 
@@ -690,12 +866,41 @@ class TestStockholmWriter(unittest.TestCase):
                                                'AS': list('TCAGCTCTGCAGCGTT')})
         msa = TabularMSA([DNA('TCCTTGAACTACCCGA')],
                          positional_metadata=pos_metadata_dataframe)
-        with six.assertRaisesRegex(self, StockholmFormatError,
-                                   'Multiple sequence alignment positional '
-                                   'metadata.*must contain a single character'
-                                   '.*Found value\(s\) in column AC'):
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    'Multiple sequence alignment positional '
+                                    'metadata.*must contain a single character'
+                                    '.*Found value\(s\) in column AC'):
+            fh = io.StringIO()
+            _tabular_msa_to_stockholm(msa, fh)
+
+    def test_rn_not_list_of_refs_error(self):
+        msa = TabularMSA([], metadata={'RN': '1'})
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    "Expected 'RN'.*list of reference"
+                                    ".*got '1'"):
             fh = io.StringIO()
             _tabular_msa_to_stockholm(msa, fh)
 
+    def test_rn_data_not_in_dict_error(self):
+        msa = TabularMSA([], metadata={'RN': [OrderedDict([('RL',
+                                                            'Flagstaff')]),
+                                              'Incorrect Item']})
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    "Expected reference information.*stored"
+                                    " as a dictionary, found.*2 stored as "
+                                    "'str'"):
+            fh = io.StringIO()
+            _tabular_msa_to_stockholm(msa, fh)
+
+    def test_invalid_reference_tag_error(self):
+        msa = TabularMSA([], metadata={'RN': [OrderedDict([('RL', 'Flagstaff'),
+                                                           ('foo', 'bar')])]})
+        with self.assertRaisesRegex(StockholmFormatError,
+                                    "Invalid reference.*foo' found in.*1.*Vali"
+                                    "d reference tags are:"):
+                fh = io.StringIO()
+                _tabular_msa_to_stockholm(msa, fh)
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/skbio/io/registry.py b/skbio/io/registry.py
index 4578c38..2d1e731 100644
--- a/skbio/io/registry.py
+++ b/skbio/io/registry.py
@@ -167,8 +167,6 @@ The following are not yet used but should be avoided as well:
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from warnings import warn
 import types
 import traceback
@@ -176,8 +174,6 @@ import itertools
 import inspect
 from functools import wraps
 
-from future.builtins import zip
-
 from ._exception import DuplicateRegistrationError, InvalidRegistrationError
 from . import (UnrecognizedFormatError, ArgumentOverrideWarning,
                FormatIdentificationWarning)
@@ -188,7 +184,7 @@ from skbio.util._decorator import stable, classonlymethod
 FileSentinel = make_sentinel("FileSentinel")
 
 
-class IORegistry(object):
+class IORegistry:
     """Create a registry of formats and implementations which map to classes.
 
     """
@@ -506,7 +502,13 @@ class IORegistry(object):
             # on the first call from __iter__
             # eta-reduction is possible, but we want to the type to be
             # GeneratorType
-            return (x for x in itertools.chain([next(gen)], gen))
+            try:
+                return (x for x in itertools.chain([next(gen)], gen))
+            except StopIteration:
+                # If the error was a StopIteration, then we want to return an
+                # empty generator as `next(gen)` failed.
+                # See #1313 for more info.
+                return (x for x in [])
         else:
             return self._read_ret(file, format, into, verify, kwargs)
 
@@ -526,8 +528,7 @@ class IORegistry(object):
         with _resolve_file(file, **io_kwargs) as (file, _, _):
             reader, kwargs = self._init_reader(file, fmt, into, verify, kwargs,
                                                io_kwargs)
-            for item in reader(file, **kwargs):
-                yield item
+            yield from reader(file, **kwargs)
 
     def _find_io_kwargs(self, kwargs):
         return {k: kwargs[k] for k in _open_kwargs if k in kwargs}
@@ -557,11 +558,25 @@ class IORegistry(object):
 
         reader = self.get_reader(fmt, into)
         if reader is None:
+            possible_intos = [r.__name__ for r in
+                              self._get_possible_readers(fmt)]
+            message = ''
+            if possible_intos:
+                message = ("Possible values for `into` include: %s"
+                           % ', '.join(possible_intos))
+            into_message = '`into` also not provided.' if not into else ''
             raise UnrecognizedFormatError(
-                "Cannot read %r from %r, no %s reader found." %
-                (fmt, file, into.__name__ if into else 'generator'))
+                "Cannot read %r from %r, no %s reader found. %s %s" %
+                (fmt, file, into.__name__ if into else 'generator',
+                 into_message, message))
         return reader, kwargs
 
+    def _get_possible_readers(self, fmt):
+        for lookup in self._lookups:
+            if fmt in lookup:
+                return list(lookup[fmt].readers)
+        return []
+
     @stable(as_of="0.4.0")
     def write(self, obj, format, into, **kwargs):
         """Write `obj` as `format` into a file.
@@ -754,7 +769,7 @@ skbio.io.util.open
 """
 
 
-class Format(object):
+class Format:
     """Defines a format on which readers/writers/sniffer can be registered.
 
     Parameters
@@ -792,7 +807,7 @@ class Format(object):
     @property
     @stable(as_of="0.4.0")
     def readers(self):
-        """Dictionary that maps classes to their writers for this format."""
+        """Dictionary that maps classes to their readers for this format."""
         return self._readers
 
     @property
@@ -950,7 +965,7 @@ class Format(object):
         >>> registry.add_format(myformat)
         >>> # If developing a new format for skbio, use the create_format()
         >>> # factory instead of the above.
-        >>> class MyObject(object):
+        >>> class MyObject:
         ...     def __init__(self, content):
         ...         self.content = content
         ...
@@ -990,8 +1005,7 @@ class Format(object):
                         file_params, file, encoding, newline, kwargs)
                     with open_files(files, mode='r', **io_kwargs) as fhs:
                         kwargs.update(zip(file_keys, fhs[:-1]))
-                        for item in reader_function(fhs[-1], **kwargs):
-                            yield item
+                        yield from reader_function(fhs[-1], **kwargs)
 
             self._add_reader(cls, wrapped_reader, monkey_patch, override)
             return wrapped_reader
@@ -1036,7 +1050,7 @@ class Format(object):
         >>> registry.add_format(myformat)
         >>> # If developing a new format for skbio, use the create_format()
         >>> # factory instead of the above.
-        >>> class MyObject(object):
+        >>> class MyObject:
         ...     default_write_format = 'myformat'
         ...     def __init__(self, content):
         ...         self.content = content
diff --git a/skbio/io/tests/__init__.py b/skbio/io/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/io/tests/__init__.py
+++ b/skbio/io/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/io/tests/test_iosources.py b/skbio/io/tests/test_iosources.py
index 50cbd29..116cafa 100644
--- a/skbio/io/tests/test_iosources.py
+++ b/skbio/io/tests/test_iosources.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import unittest
 
 from skbio.io._iosources import IOSource, Compressor
diff --git a/skbio/io/tests/test_registry.py b/skbio/io/tests/test_registry.py
index 20291ea..183121e 100644
--- a/skbio/io/tests/test_registry.py
+++ b/skbio/io/tests/test_registry.py
@@ -6,13 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-import six
-from six.moves import zip_longest
-
 from io import StringIO
 import io
+import itertools
 import os
 import unittest
 import warnings
@@ -30,7 +26,7 @@ from skbio.util._exception import TestingUtilError
 from skbio import DNA, read, write
 
 
-class TestClass(object):
+class TestClass:
     def __init__(self, l):
         self.list = l
 
@@ -558,7 +554,7 @@ class TestSniff(RegistryTest):
             return
 
     def test_no_matches(self):
-        fh = StringIO(u"no matches here")
+        fh = StringIO("no matches here")
         with self.assertRaises(UnrecognizedFormatError) as cm:
             self.registry.sniff(fh)
         self.assertTrue(str(fh) in str(cm.exception))
@@ -566,11 +562,11 @@ class TestSniff(RegistryTest):
         fh.close()
 
     def test_one_match(self):
-        fh = StringIO(u"contains a 3")
+        fh = StringIO("contains a 3")
         self.assertEqual('format3', self.registry.sniff(fh)[0])
 
     def test_many_matches(self):
-        fh = StringIO(u"1234 will match all")
+        fh = StringIO("1234 will match all")
         with self.assertRaises(UnrecognizedFormatError) as cm:
             self.registry.sniff(fh)
         self.assertTrue("format1" in str(cm.exception))
@@ -601,7 +597,7 @@ class TestSniff(RegistryTest):
         def sniffer(fh):
             return True, {}
 
-        with six.assertRaisesRegex(self, TypeError, '`newline`'):
+        with self.assertRaisesRegex(TypeError, '`newline`'):
             self.registry.sniff(fp, newline='\r')
 
     def test_non_default_encoding(self):
@@ -654,7 +650,7 @@ class TestSniff(RegistryTest):
         def sniffer(fh):
             return True, {}
 
-        fh = StringIO(u'a\nb\nc\nd\n')
+        fh = StringIO('a\nb\nc\nd\n')
         fh.seek(2)
         self.registry.sniff(fh)
         self.assertEqual('b\n', fh.readline())
@@ -730,7 +726,7 @@ class TestSniff(RegistryTest):
         self.assertEqual(fmt, 'binf')
 
         with self.assertRaises(ValueError):
-            self.registry.sniff([u'some content\n'], encoding='binary')
+            self.registry.sniff(['some content\n'], encoding='binary')
 
         with self.assertRaises(ValueError):
             binf_sniffer(self.fp1, encoding=None)
@@ -762,7 +758,7 @@ class TestSniff(RegistryTest):
         self.assertTrue(self._check_textf)
 
     def test_sniff_gzip(self):
-        expected = u"This is some content\nIt occurs on more than one line\n"
+        expected = "This is some content\nIt occurs on more than one line\n"
 
         formata = self.registry.create_format('formata', encoding='binary')
         formatb = self.registry.create_format('formatb')
@@ -811,7 +807,7 @@ class TestSniff(RegistryTest):
         self._check_binf = False
         self._check_textf = False
 
-        fmt, _ = self.registry.sniff([u'text'])
+        fmt, _ = self.registry.sniff(['text'])
         self.assertEqual(fmt, 'textf')
 
         self.assertFalse(self._check_binf)
@@ -861,7 +857,7 @@ class TestRead(RegistryTest):
     def test_format_is_none(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -876,16 +872,30 @@ class TestRead(RegistryTest):
         self.assertEqual(TestClass([1, 2, 3, 4]), instance)
         fh.close()
 
+    def test_into_is_none_and_no_generator_reader(self):
+        format1 = self.registry.create_format('format1')
+
+        fh = StringIO('1\n2\n3\n4')
+
+        @format1.reader(TestClass)
+        def reader(fh):
+            self.assertIsInstance(fh, io.TextIOBase)
+            return
+
+        with self.assertRaisesRegex(UnrecognizedFormatError,
+                                    "Cannot read 'format1'.*Possible.*include"
+                                    ": TestClass"):
+            self.registry.read(fh, format='format1')
+
     def test_into_is_none(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.reader(None)
         def reader(fh):
             self.assertIsInstance(fh, io.TextIOBase)
-            for value in [int(x) for x in fh.read().split('\n')]:
-                yield value
+            yield from [int(x) for x in fh.read().split('\n')]
 
         generator = self.registry.read(fh, format='format1')
         self.assertIsInstance(generator, types.GeneratorType)
@@ -910,11 +920,10 @@ class TestRead(RegistryTest):
         @format1.reader(None)
         def reader(fh):
             self._test_fh = fh
-            for value in [int(x) for x in fh.read().split('\n')]:
-                yield value
+            yield from [int(x) for x in fh.read().split('\n')]
 
         generator = self.registry.read(fp, format='format1')
-        for a, b in zip_longest(generator, [1, 2, 3, 4]):
+        for a, b in itertools.zip_longest(generator, [1, 2, 3, 4]):
             self.assertEqual(a, b)
         self.assertTrue(self._test_fh.closed)
 
@@ -935,7 +944,7 @@ class TestRead(RegistryTest):
     def test_reader_exists_with_verify_true(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -965,7 +974,7 @@ class TestRead(RegistryTest):
     def test_warning_raised(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -999,7 +1008,7 @@ class TestRead(RegistryTest):
     def test_reader_exists_with_verify_false(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1065,16 +1074,16 @@ class TestRead(RegistryTest):
             self.assertEqual(kwargs['arg3'], [1])
             return
 
-        self.registry.read(StringIO(u'notempty'), into=TestClass, arg3=[1])
+        self.registry.read(StringIO('notempty'), into=TestClass, arg3=[1])
 
         with warnings.catch_warnings(record=True):
             warnings.simplefilter("error")
             # Should raise no warning and thus no error.
-            self.registry.read(StringIO(u'notempty'), into=TestClass, arg3=[1],
+            self.registry.read(StringIO('notempty'), into=TestClass, arg3=[1],
                                override=30)
             # Should raise a warning and thus an error.
             with self.assertRaises(ArgumentOverrideWarning):
-                self.registry.read(StringIO(u'notempty'), into=TestClass,
+                self.registry.read(StringIO('notempty'), into=TestClass,
                                    arg3=[1], override=100)
 
     def test_that_encoding_is_used(self):
@@ -1084,7 +1093,7 @@ class TestRead(RegistryTest):
 
         @format1.sniffer()
         def sniffer(fh):
-            return u'\u4f60' in fh.readline(), {}
+            return '\u4f60' in fh.readline(), {}
 
         @format1.reader(TestClass)
         def reader(fh):
@@ -1098,11 +1107,11 @@ class TestRead(RegistryTest):
 
         self._expected_enc = 'big5'
         instance = self.registry.read(fp, into=TestClass, encoding='big5')
-        self.assertEqual(TestClass([u'\u4f60\u597d\n']), instance)
+        self.assertEqual(TestClass(['\u4f60\u597d\n']), instance)
 
         self._expected_enc = 'big5'
         gen = self.registry.read(fp, format='format1', encoding='big5')
-        self.assertEqual(TestClass([u'\u4f60\u597d\n']), next(gen))
+        self.assertEqual(TestClass(['\u4f60\u597d\n']), next(gen))
 
     def test_non_default_encoding(self):
         format1 = self.registry.create_format('format1', encoding='big5')
@@ -1125,10 +1134,10 @@ class TestRead(RegistryTest):
 
         self._expected_enc = 'big5'
         instance = self.registry.read(fp, into=TestClass)
-        self.assertEqual(TestClass([u'\u4f60\u597d\n']), instance)
+        self.assertEqual(TestClass(['\u4f60\u597d\n']), instance)
 
         gen = self.registry.read(fp, format='format1')
-        self.assertEqual(TestClass([u'\u4f60\u597d\n']), next(gen))
+        self.assertEqual(TestClass(['\u4f60\u597d\n']), next(gen))
         gen.close()
 
         self._expected_enc = 'utf8'
@@ -1155,10 +1164,10 @@ class TestRead(RegistryTest):
         def reader_gen(fh):
             yield TestClass(fh.readlines())
 
-        with six.assertRaisesRegex(self, TypeError, '`newline`'):
+        with self.assertRaisesRegex(TypeError, '`newline`'):
             self.registry.read(fp, into=TestClass, newline='\r')
 
-        with six.assertRaisesRegex(self, TypeError, '`newline`'):
+        with self.assertRaisesRegex(TypeError, '`newline`'):
             self.registry.read(fp, format='formatx', newline='\r')
 
     def test_non_default_newline(self):
@@ -1190,7 +1199,7 @@ class TestRead(RegistryTest):
 
         extra = get_data_path('real_file')
         extra_2 = get_data_path('real_file_2')
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1211,7 +1220,7 @@ class TestRead(RegistryTest):
     def test_file_sentinel_converted_to_none(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1231,7 +1240,7 @@ class TestRead(RegistryTest):
     def test_file_sentinel_pass_none(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1254,7 +1263,7 @@ class TestRead(RegistryTest):
 
         extra = get_data_path('real_file')
         extra_2 = get_data_path('real_file_2')
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1275,7 +1284,7 @@ class TestRead(RegistryTest):
     def test_file_sentinel_converted_to_none_generator(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1295,7 +1304,7 @@ class TestRead(RegistryTest):
     def test_file_sentinel_pass_none_generator(self):
         format1 = self.registry.create_format('format1')
 
-        fh = StringIO(u'1\n2\n3\n4')
+        fh = StringIO('1\n2\n3\n4')
 
         @format1.sniffer()
         def sniffer(fh):
@@ -1355,23 +1364,23 @@ class TestRead(RegistryTest):
         gen.close()
 
         with self.assertRaises(ValueError):
-            self.registry.read([u'some content\n'], encoding='binary',
+            self.registry.read(['some content\n'], encoding='binary',
                                into=TestClass)
 
         with self.assertRaises(ValueError):
-            self.registry.read([u'some content\n'], format='textf',
+            self.registry.read(['some content\n'], format='textf',
                                encoding='binary', into=TestClass)
 
         with self.assertRaises(ValueError):
-            self.registry.read([u'some content\n'], format='textf',
+            self.registry.read(['some content\n'], format='textf',
                                encoding='binary', verify=False, into=TestClass)
 
         with self.assertRaises(ValueError):
-            self.registry.read([u'some content\n'], format='textf',
+            self.registry.read(['some content\n'], format='textf',
                                encoding='binary')
 
         with self.assertRaises(ValueError):
-            self.registry.read([u'some content\n'], format='textf',
+            self.registry.read(['some content\n'], format='textf',
                                encoding='binary', verify=False)
 
         with self.assertRaises(ValueError):
@@ -1435,6 +1444,23 @@ class TestRead(RegistryTest):
         self.assertEqual(next(gen), TestClass(['woo']))
         gen.close()
 
+    def test_read_empty_file_gen_with_format(self):
+        format1 = self.registry.create_format('format1')
+
+        @format1.sniffer()
+        def sniffer(fh):
+            return True, {}
+
+        @format1.reader(None)
+        def reader1(fh):
+            return
+            yield
+
+        with io.StringIO("") as fh:
+            gen = self.registry.read(fh, format='format1')
+
+        self.assertEqual(list(gen), [])
+
 
 class TestWrite(RegistryTest):
     def test_writer_does_not_exist(self):
@@ -1455,7 +1481,7 @@ class TestWrite(RegistryTest):
         @format1.writer(TestClass)
         def writer(obj, fh):
             self.assertIsInstance(fh, io.TextIOBase)
-            fh.write(u'\n'.join(obj.list))
+            fh.write('\n'.join(obj.list))
 
         self.registry.write(obj, format='format1', into=fh)
         fh.seek(0)
@@ -1471,12 +1497,12 @@ class TestWrite(RegistryTest):
         @format1.writer(TestClass)
         def writer(obj, fh):
             self.assertIsInstance(fh, io.TextIOBase)
-            fh.write(u'\n'.join(obj.list))
+            fh.write('\n'.join(obj.list))
 
         self.registry.write(obj, format='format1', into=fp)
 
         with io.open(fp) as fh:
-            self.assertEqual(u"1\n2\n3\n4", fh.read())
+            self.assertEqual("1\n2\n3\n4", fh.read())
 
     def test_writer_passed_kwargs(self):
         format1 = self.registry.create_format('format1')
@@ -1496,12 +1522,12 @@ class TestWrite(RegistryTest):
     def test_that_encoding_is_used(self):
         format1 = self.registry.create_format('format1')
 
-        obj = TestClass([u'\u4f60\u597d\n'])  # Ni Hau
+        obj = TestClass(['\u4f60\u597d\n'])  # Ni Hau
         fp = self.fp1
 
         @format1.writer(TestClass)
         def writer(obj, fh):
-            fh.write(u''.join(obj.list))
+            fh.write(''.join(obj.list))
             self.assertEqual(self._expected_encoding, fh.encoding)
 
         self._expected_encoding = 'big5'
@@ -1514,12 +1540,12 @@ class TestWrite(RegistryTest):
     def test_non_default_encoding(self):
         format1 = self.registry.create_format('format1', encoding='big5')
 
-        obj = TestClass([u'\u4f60\u597d\n'])  # Ni Hau
+        obj = TestClass(['\u4f60\u597d\n'])  # Ni Hau
         fp = self.fp1
 
         @format1.writer(TestClass)
         def writer(obj, fh):
-            fh.write(u''.join(obj.list))
+            fh.write(''.join(obj.list))
             self.assertEqual(self._expected_encoding, fh.encoding)
 
         self._expected_encoding = 'big5'
@@ -1537,12 +1563,12 @@ class TestWrite(RegistryTest):
     def test_that_newline_is_used(self):
         format1 = self.registry.create_format('format1')
 
-        obj = TestClass([u'a\n', u'b\n', u'c\n'])
+        obj = TestClass(['a\n', 'b\n', 'c\n'])
         fp = self.fp1
 
         @format1.writer(TestClass)
         def writer(obj, fh):
-            fh.write(u''.join(obj.list))
+            fh.write(''.join(obj.list))
 
         self.registry.write(obj, format='format1', into=fp, newline='\r')
 
@@ -1552,12 +1578,12 @@ class TestWrite(RegistryTest):
     def test_non_default_newline(self):
         format1 = self.registry.create_format('format1', newline='\r')
 
-        obj = TestClass([u'a\n', u'b\n', u'c\n'])
+        obj = TestClass(['a\n', 'b\n', 'c\n'])
         fp = self.fp1
 
         @format1.writer(TestClass)
         def writer(obj, fh):
-            fh.write(u''.join(obj.list))
+            fh.write(''.join(obj.list))
 
         self.registry.write(obj, format='format1', into=fp)
 
@@ -1576,8 +1602,8 @@ class TestWrite(RegistryTest):
 
         @format1.writer(TestClass)
         def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
-            extra.write(u'oh yeah...')
-            extra_2.write(u'oh no...')
+            extra.write('oh yeah...')
+            extra_2.write('oh no...')
 
         self.registry.write(TestClass([]), format='format1', into=fh,
                             extra=self.fp1, extra_2=self.fp2)
@@ -1657,7 +1683,7 @@ class TestWrite(RegistryTest):
     def test_io_kwargs_passed(self):
         format1 = self.registry.create_format('format1', encoding='ascii')
 
-        obj = TestClass([u'a\n', u'b\n', u'c\n'])
+        obj = TestClass(['a\n', 'b\n', 'c\n'])
         fp = self.fp1
         f = io.BytesIO()
 
@@ -1685,13 +1711,13 @@ class TestMonkeyPatch(RegistryTest):
     def setUp(self):
         super(TestMonkeyPatch, self).setUp()
 
-        class UnassumingClass(object):
+        class UnassumingClass:
             pass
 
-        class ClassWithDefault(object):
+        class ClassWithDefault:
             default_write_format = 'favfmt'
 
-        class NoMonkeySee(object):
+        class NoMonkeySee:
             pass
 
         self.unassuming_class = UnassumingClass
@@ -1858,7 +1884,7 @@ class TestMonkeyPatch(RegistryTest):
             self.was_called = True
 
         self.registry.monkey_patch()
-        fh = StringIO(u'notempty')
+        fh = StringIO('notempty')
         self.class_with_default.read(fh, a='a', b=123)
 
         self.assertTrue(self.was_called)
@@ -1885,26 +1911,26 @@ class TestMonkeyPatch(RegistryTest):
 class TestModuleFunctions(unittest.TestCase):
 
     def test_sniff_matches(self):
-        exp = io_registry.sniff([u'(a, b);'])
-        result = sniff([u'(a, b);'])
+        exp = io_registry.sniff(['(a, b);'])
+        result = sniff(['(a, b);'])
         self.assertEqual(exp, result)
         self.assertEqual('newick', exp[0])
         self.assertEqual({}, exp[1])
 
     def test_read_matches(self):
-        input = [u'>\n', u'ACGT\n']
+        input = ['>\n', 'ACGT\n']
         exp = io_registry.read(input, into=DNA)
         result = read(input, into=DNA)
         self.assertEqual(exp, result)
-        self.assertEqual(exp, DNA('ACGT', metadata={u'id': u'',
-                                                    u'description': u''}))
+        self.assertEqual(exp, DNA('ACGT', metadata={'id': '',
+                                                    'description': ''}))
 
     def test_write_matches(self):
         input = DNA('ACGT')
         exp = io_registry.write(input, format='fasta', into=[])
         result = write(input, format='fasta', into=[])
         self.assertEqual(exp, result)
-        self.assertEqual(exp, [u'>\n', u'ACGT\n'])
+        self.assertEqual(exp, ['>\n', 'ACGT\n'])
 
     def test_create_format_matches(self):
         with self.assertRaises(DuplicateRegistrationError):
diff --git a/skbio/io/tests/test_util.py b/skbio/io/tests/test_util.py
index 70c2d06..86ae2dc 100644
--- a/skbio/io/tests/test_util.py
+++ b/skbio/io/tests/test_util.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import unittest
 import tempfile
 import shutil
@@ -37,21 +34,21 @@ class TestOpen(unittest.TestCase):
 
     def test_open_invalid_source_compression(self):
         with self.assertRaises(ValueError):
-            skbio.io.open([u'foo'], compression='gzip')
+            skbio.io.open(['foo'], compression='gzip')
 
     def test_open_invalid_source_encoding(self):
         with self.assertRaises(ValueError):
-            skbio.io.open([u'foo'], encoding='binary')
+            skbio.io.open(['foo'], encoding='binary')
 
         with self.assertRaises(ValueError):
-            skbio.io.open([u'foo'], encoding='binary', newline='\r')
+            skbio.io.open(['foo'], encoding='binary', newline='\r')
 
     def test_open_invalid_compression(self):
         with self.assertRaises(ValueError):
             skbio.io.open(io.BytesIO(), compression='foo')
 
 
-class ReadableBinarySourceTests(object):
+class ReadableBinarySourceTests:
     def check_closed(self, file, expected):
         if hasattr(file, 'closed'):
             self.assertEqual(file.closed, expected)
@@ -284,7 +281,7 @@ class ReadableSourceTest(unittest.TestCase):
 
         self.binary_contents = (b"This is some content\n"
                                 b"It occurs on more than one line\n")
-        self.decoded_contents = u'\u4f60\u597d\n'  # Ni Hau
+        self.decoded_contents = '\u4f60\u597d\n'  # Ni Hau
         self.compression = 'gzip'
         self.encoding = "big5"
 
@@ -301,7 +298,7 @@ class ReadableSourceTest(unittest.TestCase):
             f.close()
 
 
-class WritableBinarySourceTests(object):
+class WritableBinarySourceTests:
     def check_closed(self, file, expected):
         if hasattr(file, 'closed'):
             self.assertEqual(file.closed, expected)
@@ -525,35 +522,77 @@ class TestWriteBufferedReader(WritableBinarySourceTests, WritableSourceTest):
             return f.read()
 
 
+class TestReadNamedTemporaryFile(ReadableBinarySourceTests,
+                                 ReadableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        fileobj = tempfile.NamedTemporaryFile()
+        with io.open(path, mode='rb') as fh:
+            fileobj.write(fh.read())
+            fileobj.flush()
+            fileobj.seek(0)
+        return fileobj
+
+
+class TestWriteNamedTemporaryFile(WritableBinarySourceTests,
+                                  WritableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        return tempfile.NamedTemporaryFile()
+
+    def get_contents(self, file):
+        file.flush()
+        file.seek(0)
+        contents = file.read()
+        file.close()
+        return contents
+
+
+class TestReadTemporaryFile(ReadableBinarySourceTests, ReadableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        fileobj = tempfile.TemporaryFile()
+        with io.open(path, mode='rb') as fh:
+            fileobj.write(fh.read())
+            fileobj.flush()
+            fileobj.seek(0)
+        return fileobj
+
+
+class TestWriteTemporaryFile(WritableBinarySourceTests, WritableSourceTest):
+    expected_close = False
+
+    def get_fileobj(self, path):
+        return tempfile.TemporaryFile()
+
+    def get_contents(self, file):
+        file.flush()
+        file.seek(0)
+        contents = file.read()
+        file.close()
+        return contents
+
+
 class TestIterableReaderWriter(unittest.TestCase):
     def test_open(self):
         def gen():
-            yield u'a'
-            yield u'b'
-            yield u'c'
+            yield from ('a', 'b', 'c')
         list_ = list(gen())
 
         for input_ in gen(), list_:
             with skbio.io.open(input_) as result:
                 self.assertIsInstance(result, io.TextIOBase)
-                self.assertEqual(result.read(), u'abc')
+                self.assertEqual(result.read(), 'abc')
 
     def test_open_with_newline(self):
-        l = [u'a\r', u'b\r', u'c\r']
+        l = ['a\r', 'b\r', 'c\r']
         with skbio.io.open(l, newline='\r') as result:
             self.assertIsInstance(result, io.TextIOBase)
             self.assertEqual(result.readlines(), l)
 
-    def test_open_invalid_iterable_missing_u(self):
-        is_py2 = six.PY2
-        six.PY2 = True
-        try:
-            with six.assertRaisesRegex(self, skbio.io.IOSourceError,
-                                       ".*Prepend.*`u`.*"):
-                skbio.io.open([b'abc'])
-        finally:
-            six.PY2 = is_py2
-
     def test_open_invalid_iterable(self):
         with self.assertRaises(skbio.io.IOSourceError):
             skbio.io.open([1, 2, 3])
@@ -561,18 +600,18 @@ class TestIterableReaderWriter(unittest.TestCase):
     def test_open_empty_iterable(self):
         with skbio.io.open([]) as result:
             self.assertIsInstance(result, io.TextIOBase)
-            self.assertEqual(result.read(), u'')
+            self.assertEqual(result.read(), '')
 
     def test_open_write_mode(self):
         l = []
         with skbio.io.open(l, mode='w') as fh:
-            fh.write(u'abc')
-        self.assertEqual(l, [u'abc'])
+            fh.write('abc')
+        self.assertEqual(l, ['abc'])
 
         l = []
         with skbio.io.open(l, mode='w', newline='\r') as fh:
-            fh.write(u'ab\nc\n')
-        self.assertEqual(l, [u'ab\r', u'c\r'])
+            fh.write('ab\nc\n')
+        self.assertEqual(l, ['ab\r', 'c\r'])
 
         self.assertTrue(fh.closed)
         fh.close()
diff --git a/skbio/io/util.py b/skbio/io/util.py
index 2a35f9b..5799938 100644
--- a/skbio/io/util.py
+++ b/skbio/io/util.py
@@ -27,10 +27,8 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import io
-from contextlib2 import contextmanager, ExitStack
+from contextlib import contextmanager, ExitStack
 
 from skbio.io import IOSourceError
 from skbio.io._iosources import get_io_sources, get_compression_handler
@@ -77,37 +75,38 @@ def open(file, mode=_d['mode'], encoding=_d['encoding'], errors=_d['errors'],
 
     Supported inputs:
 
-    +----------------------------+----------+-----------+-------------+
-    | type                       | can read | can write | source type |
-    +============================+==========+===========+=============+
-    | file path                  | True     | True      | Binary      |
-    +----------------------------+----------+-----------+-------------+
-    | URL                        | True     | False     | Binary      |
-    +----------------------------+----------+-----------+-------------+
-    | ``[u"lines list\n"]``      | True     | True      | Text        |
-    +----------------------------+----------+-----------+-------------+
-    | :class:`io.StringIO`       | True     | True      | Text        |
-    +----------------------------+----------+-----------+-------------+
-    | :class:`io.BytesIO`        | True     | True      | Binary      |
-    +----------------------------+----------+-----------+-------------+
-    | :class:`io.TextIOWrapper`  | True     | True      | Text        |
-    +----------------------------+----------+-----------+-------------+
-    | :class:`io.BufferedReader` | True     | False     | Binary      |
-    +----------------------------+----------+-----------+-------------+
-    | :class:`io.BufferedWriter` | False    | True      | Binary      |
-    +----------------------------+----------+-----------+-------------+
-    | :class:`io.BufferedRandom` | True     | True      | Binary      |
-    +----------------------------+----------+-----------+-------------+
-
-    .. note:: Filehandles opened with ``open`` in Python 2 are **not**
-       supported. Use ``io.open`` if you need to pass a filehandle.
+    +--------------------------------------+--------+---------+-----------+
+    | type                                 | can \  | can \   | source \  |
+    |                                      | read   | write   | type      |
+    +======================================+========+=========+===========+
+    | file path                            | True   | True    | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | URL                                  | True   | False   | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | ``["lines list\n"]``                 | True   | True    | Text      |
+    +--------------------------------------+--------+---------+-----------+
+    | :class:`io.StringIO`                 | True   | True    | Text      |
+    +--------------------------------------+--------+---------+-----------+
+    | :class:`io.BytesIO`                  | True   | True    | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | :class:`io.TextIOWrapper`            | True   | True    | Text      |
+    +--------------------------------------+--------+---------+-----------+
+    | :class:`io.BufferedReader`           | True   | False   | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | :class:`io.BufferedWriter`           | False  | True    | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | :class:`io.BufferedRandom`           | True   | True    | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | :func:`tempfile.TemporaryFile`       | True   | True    | Binary    |
+    +--------------------------------------+--------+---------+-----------+
+    | :func:`tempfile.NamedTemporaryFile`  | True   | True    | Binary    |
+    +--------------------------------------+--------+---------+-----------+
 
     .. note:: When reading a list of unicode (str) lines, the input for
        `newline` is used to determine the number of lines in the resulting file
        handle, not the number of elements in the list. This is to allow
        composition with ``file.readlines()``.
 
-
     Parameters
     ----------
     file : filepath, url, filehandle, list
@@ -115,7 +114,7 @@ def open(file, mode=_d['mode'], encoding=_d['encoding'], errors=_d['errors'],
     mode : {'r', 'w'}, optional
         Whether to return a readable or writable file. Conversely, this does
         not imply that the returned file will be unwritable or unreadable.
-        To geta binary filehandle set `encoding` to binary.
+        To get a binary filehandle set `encoding` to binary.
     encoding : str, optional
         The encoding scheme to use for the file. If set to 'binary', no bytes
         will be translated. Otherwise this matches the behavior of
diff --git a/skbio/io/format/__init__.py b/skbio/metadata/__init__.py
similarity index 70%
copy from skbio/io/format/__init__.py
copy to skbio/metadata/__init__.py
index f85db28..6db261d 100644
--- a/skbio/io/format/__init__.py
+++ b/skbio/metadata/__init__.py
@@ -1,3 +1,12 @@
+r"""
+Metadata (:mod:`skbio.metadata`)
+================================
+
+.. currentmodule:: skbio.metadata
+
+This module provides classes for storing and working with metadata.
+"""
+
 # ----------------------------------------------------------------------------
 # Copyright (c) 2013--, scikit-bio development team.
 #
@@ -6,8 +15,5 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
-
 test = TestRunner(__file__).test
diff --git a/skbio/metadata/_mixin.py b/skbio/metadata/_mixin.py
new file mode 100644
index 0000000..a6f5648
--- /dev/null
+++ b/skbio/metadata/_mixin.py
@@ -0,0 +1,408 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import abc
+import copy
+
+import pandas as pd
+
+from skbio.util._decorator import stable
+
+
+class MetadataMixin(metaclass=abc.ABCMeta):
+    @property
+    @stable(as_of="0.4.0")
+    def metadata(self):
+        """``dict`` containing metadata which applies to the entire object.
+
+        Notes
+        -----
+        This property can be set and deleted. When setting new metadata a
+        shallow copy of the dictionary is made.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with metadata share a common interface for
+           accessing and manipulating their metadata. The following examples
+           use scikit-bio's ``Sequence`` class to demonstrate metadata
+           behavior. These examples apply to all other scikit-bio objects
+           storing metadata.
+
+        Create a sequence with metadata:
+
+        >>> from pprint import pprint
+        >>> from skbio import Sequence
+        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id',
+        ...                                  'description': 'seq description'})
+
+        Retrieve metadata:
+
+        >>> pprint(seq.metadata) # using pprint to display dict in sorted order
+        {'description': 'seq description', 'id': 'seq-id'}
+
+        Update metadata:
+
+        >>> seq.metadata['id'] = 'new-id'
+        >>> seq.metadata['pubmed'] = 12345
+        >>> pprint(seq.metadata)
+        {'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
+
+        Set metadata:
+
+        >>> seq.metadata = {'abc': 123}
+        >>> seq.metadata
+        {'abc': 123}
+
+        Delete metadata:
+
+        >>> seq.has_metadata()
+        True
+        >>> del seq.metadata
+        >>> seq.metadata
+        {}
+        >>> seq.has_metadata()
+        False
+
+        """
+        if self._metadata is None:
+            # Not using setter to avoid copy.
+            self._metadata = {}
+        return self._metadata
+
+    @metadata.setter
+    def metadata(self, metadata):
+        if not isinstance(metadata, dict):
+            raise TypeError("metadata must be a dict, not type %r" %
+                            type(metadata).__name__)
+        # Shallow copy.
+        self._metadata = metadata.copy()
+
+    @metadata.deleter
+    def metadata(self):
+        self._metadata = None
+
+    @abc.abstractmethod
+    def __init__(self, metadata=None):
+        raise NotImplementedError
+
+    def _init_(self, metadata=None):
+        if metadata is None:
+            # Could use deleter but this is less overhead and needs to be fast.
+            self._metadata = None
+        else:
+            # Use setter for validation and copy.
+            self.metadata = metadata
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        raise NotImplementedError
+
+    def _eq_(self, other):
+        # We're not simply comparing self.metadata to other.metadata in order
+        # to avoid creating "empty" metadata representations on the objects if
+        # they don't have metadata.
+        if self.has_metadata() and other.has_metadata():
+            return self.metadata == other.metadata
+        elif not (self.has_metadata() or other.has_metadata()):
+            # Both don't have metadata.
+            return True
+        else:
+            # One has metadata while the other does not.
+            return False
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        raise NotImplementedError
+
+    def _ne_(self, other):
+        return not (self == other)
+
+    @abc.abstractmethod
+    def __copy__(self):
+        raise NotImplementedError
+
+    def _copy_(self):
+        if self.has_metadata():
+            return self.metadata.copy()
+        else:
+            return None
+
+    @abc.abstractmethod
+    def __deepcopy__(self, memo):
+        raise NotImplementedError
+
+    def _deepcopy_(self, memo):
+        if self.has_metadata():
+            return copy.deepcopy(self.metadata, memo)
+        else:
+            return None
+
+    @stable(as_of="0.4.0")
+    def has_metadata(self):
+        """Determine if the object has metadata.
+
+        An object has metadata if its ``metadata`` dictionary is not empty
+        (i.e., has at least one key-value pair).
+
+        Returns
+        -------
+        bool
+            Indicates whether the object has metadata.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with metadata share a common interface for
+           accessing and manipulating their metadata. The following examples
+           use scikit-bio's ``Sequence`` class to demonstrate metadata
+           behavior. These examples apply to all other scikit-bio objects
+           storing metadata.
+
+        >>> from skbio import Sequence
+        >>> seq = Sequence('ACGT')
+        >>> seq.has_metadata()
+        False
+        >>> seq = Sequence('ACGT', metadata={})
+        >>> seq.has_metadata()
+        False
+        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id'})
+        >>> seq.has_metadata()
+        True
+
+        """
+        return self._metadata is not None and bool(self.metadata)
+
+
+class PositionalMetadataMixin(metaclass=abc.ABCMeta):
+    @abc.abstractmethod
+    def _positional_metadata_axis_len_(self):
+        """Return length of axis that positional metadata applies to.
+
+        Returns
+        -------
+        int
+            Positional metadata axis length.
+
+        """
+        raise NotImplementedError
+
+    @property
+    @stable(as_of="0.4.0")
+    def positional_metadata(self):
+        """``pd.DataFrame`` containing metadata along an axis.
+
+        Notes
+        -----
+        This property can be set and deleted. When setting new positional
+        metadata, a shallow copy is made and the ``pd.DataFrame`` index is set
+        to ``pd.RangeIndex(start=0, stop=axis_len, step=1)``.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with positional metadata share a common
+           interface for accessing and manipulating their positional metadata.
+           The following examples use scikit-bio's ``DNA`` class to demonstrate
+           positional metadata behavior. These examples apply to all other
+           scikit-bio objects storing positional metadata.
+
+        Create a DNA sequence with positional metadata:
+
+        >>> from skbio import DNA
+        >>> seq = DNA(
+        ...     'ACGT',
+        ...     positional_metadata={'quality': [3, 3, 20, 11],
+        ...                          'exons': [True, True, False, True]})
+        >>> seq
+        DNA
+        -----------------------------
+        Positional metadata:
+            'exons': <dtype: bool>
+            'quality': <dtype: int64>
+        Stats:
+            length: 4
+            has gaps: False
+            has degenerates: False
+            has definites: True
+            GC-content: 50.00%
+        -----------------------------
+        0 ACGT
+
+        Retrieve positional metadata:
+
+        >>> seq.positional_metadata
+           exons  quality
+        0   True        3
+        1   True        3
+        2  False       20
+        3   True       11
+
+        Update positional metadata:
+
+        >>> seq.positional_metadata['gaps'] = seq.gaps()
+        >>> seq.positional_metadata
+           exons  quality   gaps
+        0   True        3  False
+        1   True        3  False
+        2  False       20  False
+        3   True       11  False
+
+        Set positional metadata:
+
+        >>> seq.positional_metadata = {'degenerates': seq.degenerates()}
+        >>> seq.positional_metadata
+          degenerates
+        0       False
+        1       False
+        2       False
+        3       False
+
+        Delete positional metadata:
+
+        >>> seq.has_positional_metadata()
+        True
+        >>> del seq.positional_metadata
+        >>> seq.positional_metadata
+        Empty DataFrame
+        Columns: []
+        Index: [0, 1, 2, 3]
+        >>> seq.has_positional_metadata()
+        False
+
+        """
+        if self._positional_metadata is None:
+            # Not using setter to avoid copy.
+            self._positional_metadata = pd.DataFrame(
+                index=self._get_positional_metadata_index())
+        return self._positional_metadata
+
+    @positional_metadata.setter
+    def positional_metadata(self, positional_metadata):
+        try:
+            # Pass copy=True to copy underlying data buffer.
+            positional_metadata = pd.DataFrame(positional_metadata, copy=True)
+        except pd.core.common.PandasError as e:
+            raise TypeError(
+                "Invalid positional metadata. Must be consumable by "
+                "`pd.DataFrame` constructor. Original pandas error message: "
+                "\"%s\"" % e)
+
+        num_rows = len(positional_metadata.index)
+        axis_len = self._positional_metadata_axis_len_()
+        if num_rows != axis_len:
+            raise ValueError(
+                "Number of positional metadata values (%d) must match the "
+                "positional metadata axis length (%d)."
+                % (num_rows, axis_len))
+
+        positional_metadata.index = self._get_positional_metadata_index()
+        self._positional_metadata = positional_metadata
+
+    @positional_metadata.deleter
+    def positional_metadata(self):
+        self._positional_metadata = None
+
+    def _get_positional_metadata_index(self):
+        """Create a memory-efficient integer index for positional metadata."""
+        return pd.RangeIndex(start=0,
+                             stop=self._positional_metadata_axis_len_(),
+                             step=1)
+
+    @abc.abstractmethod
+    def __init__(self, positional_metadata=None):
+        raise NotImplementedError
+
+    def _init_(self, positional_metadata=None):
+        if positional_metadata is None:
+            # Could use deleter but this is less overhead and needs to be fast.
+            self._positional_metadata = None
+        else:
+            # Use setter for validation and copy.
+            self.positional_metadata = positional_metadata
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        raise NotImplementedError
+
+    def _eq_(self, other):
+        # We're not simply comparing self.positional_metadata to
+        # other.positional_metadata in order to avoid creating "empty"
+        # positional metadata representations on the objects if they don't have
+        # positional metadata.
+        if self.has_positional_metadata() and other.has_positional_metadata():
+            return self.positional_metadata.equals(other.positional_metadata)
+        elif not (self.has_positional_metadata() or
+                  other.has_positional_metadata()):
+            # Both don't have positional metadata.
+            return (self._positional_metadata_axis_len_() ==
+                    other._positional_metadata_axis_len_())
+        else:
+            # One has positional metadata while the other does not.
+            return False
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        raise NotImplementedError
+
+    def _ne_(self, other):
+        return not (self == other)
+
+    @abc.abstractmethod
+    def __copy__(self):
+        raise NotImplementedError
+
+    def _copy_(self):
+        if self.has_positional_metadata():
+            # deep=True makes a shallow copy of the underlying data buffer.
+            return self.positional_metadata.copy(deep=True)
+        else:
+            return None
+
+    @abc.abstractmethod
+    def __deepcopy__(self, memo):
+        raise NotImplementedError
+
+    def _deepcopy_(self, memo):
+        if self.has_positional_metadata():
+            return copy.deepcopy(self.positional_metadata, memo)
+        else:
+            return None
+
+    @stable(as_of="0.4.0")
+    def has_positional_metadata(self):
+        """Determine if the object has positional metadata.
+
+        An object has positional metadata if its ``positional_metadata``
+        ``pd.DataFrame`` has at least one column.
+
+        Returns
+        -------
+        bool
+            Indicates whether the object has positional metadata.
+
+        Examples
+        --------
+        .. note:: scikit-bio objects with positional metadata share a common
+           interface for accessing and manipulating their positional metadata.
+           The following examples use scikit-bio's ``DNA`` class to demonstrate
+           positional metadata behavior. These examples apply to all other
+           scikit-bio objects storing positional metadata.
+
+        >>> import pandas as pd
+        >>> from skbio import DNA
+        >>> seq = DNA('ACGT')
+        >>> seq.has_positional_metadata()
+        False
+        >>> seq = DNA('ACGT', positional_metadata=pd.DataFrame(index=range(4)))
+        >>> seq.has_positional_metadata()
+        False
+        >>> seq = DNA('ACGT', positional_metadata={'quality': range(4)})
+        >>> seq.has_positional_metadata()
+        True
+
+        """
+        return (self._positional_metadata is not None and
+                len(self.positional_metadata.columns) > 0)
diff --git a/skbio/util/_metadata_repr.py b/skbio/metadata/_repr.py
similarity index 87%
rename from skbio/util/_metadata_repr.py
rename to skbio/metadata/_repr.py
index eacab93..7830e23 100644
--- a/skbio/util/_metadata_repr.py
+++ b/skbio/metadata/_repr.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import with_metaclass
-
-import six
 import itertools
 import numbers
 import textwrap
@@ -18,7 +14,7 @@ from abc import ABCMeta, abstractmethod
 from skbio._base import ElasticLines
 
 
-class _MetadataReprBuilder(with_metaclass(ABCMeta, object)):
+class _MetadataReprBuilder(metaclass=ABCMeta):
     """Abstract base class for building  a repr for an object containing
     metadata and/or positional metadata.
 
@@ -39,10 +35,12 @@ class _MetadataReprBuilder(with_metaclass(ABCMeta, object)):
     @abstractmethod
     def _process_header(self):
         """Used by `build` Template Method to build header for the repr"""
+        raise NotImplementedError
 
     @abstractmethod
     def _process_data(self):
         """Used by `build` Template Method to build data lines for the repr"""
+        raise NotImplementedError
 
     def build(self):
         """Template method for building the repr"""
@@ -84,18 +82,20 @@ class _MetadataReprBuilder(with_metaclass(ABCMeta, object)):
         key_fmt = self._format_key(key)
 
         supported_type = True
-        if isinstance(value, (six.text_type, six.binary_type)):
-            # for stringy values, there may be u'' or b'' depending on the type
-            # of `value` and version of Python. find the starting quote
-            # character so that wrapped text will line up with that instead of
-            # the string literal prefix character. for example:
+        if isinstance(value, str):
+            # extra indent of 1 so that wrapped text lines up:
             #
-            #     'foo': u'abc def ghi
-            #              jkl mno'
+            #     'foo': 'abc def ghi
+            #             jkl mno'
             value_repr = repr(value)
             extra_indent = 1
-            if not (value_repr.startswith("'") or value_repr.startswith('"')):
-                extra_indent = 2
+        elif isinstance(value, bytes):
+            # extra indent of 2 so that wrapped text lines up:
+            #
+            #     'foo': b'abc def ghi
+            #              jkl mno'
+            value_repr = repr(value)
+            extra_indent = 2
         # handles any number, this includes bool
         elif value is None or isinstance(value, numbers.Number):
             value_repr = repr(value)
@@ -135,8 +135,7 @@ class _MetadataReprBuilder(with_metaclass(ABCMeta, object)):
 
         """
         key_fmt = self._indent + repr(key)
-        supported_types = (six.text_type, six.binary_type, numbers.Number,
-                           type(None))
+        supported_types = (str, bytes, numbers.Number, type(None))
         if len(key_fmt) > (self._width / 2) or not isinstance(key,
                                                               supported_types):
             key_fmt = self._indent + str(type(key))
diff --git a/skbio/util/_testing.py b/skbio/metadata/_testing.py
similarity index 64%
copy from skbio/util/_testing.py
copy to skbio/metadata/_testing.py
index 92c111c..9787957 100644
--- a/skbio/util/_testing.py
+++ b/skbio/metadata/_testing.py
@@ -6,74 +6,19 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import PY3
-
 import copy
-import os
-import inspect
-import warnings
 
-import six
 import pandas as pd
-import nose
-
 import numpy as np
 import numpy.testing as npt
-import pandas.util.testing as pdt
-
-from skbio.util import SkbioWarning
-from ._decorator import experimental
-
-
-class ReallyEqualMixin(object):
-    """Use this for testing __eq__/__ne__.
-
-    Taken and modified from the following public domain code:
-      https://ludios.org/testing-your-eq-ne-cmp/
-
-    """
-
-    def assertReallyEqual(self, a, b):
-        # assertEqual first, because it will have a good message if the
-        # assertion fails.
-        self.assertEqual(a, b)
-        self.assertEqual(b, a)
-        self.assertTrue(a == b)
-        self.assertTrue(b == a)
-        self.assertFalse(a != b)
-        self.assertFalse(b != a)
-
-        # We do not support cmp/__cmp__ because they do not exist in Python 3.
-        # However, we still test this to catch potential bugs where the
-        # object's parent class defines a __cmp__.
-        if not PY3:
-            self.assertEqual(0, cmp(a, b))  # noqa
-            self.assertEqual(0, cmp(b, a))  # noqa
-
-    def assertReallyNotEqual(self, a, b):
-        # assertNotEqual first, because it will have a good message if the
-        # assertion fails.
-        self.assertNotEqual(a, b)
-        self.assertNotEqual(b, a)
-        self.assertFalse(a == b)
-        self.assertFalse(b == a)
-        self.assertTrue(a != b)
-        self.assertTrue(b != a)
-
-        # We do not support cmp/__cmp__ because they do not exist in Python 3.
-        # However, we still test this to catch potential bugs where the
-        # object's parent class defines a __cmp__.
-        if not PY3:
-            self.assertNotEqual(0, cmp(a, b))  # noqa
-            self.assertNotEqual(0, cmp(b, a))  # noqa
-
-
-class MetadataMixinTests(object):
+
+from skbio.util._testing import assert_data_frame_almost_equal
+
+
+class MetadataMixinTests:
     def test_constructor_invalid_type(self):
         for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
-            with six.assertRaisesRegex(self, TypeError,
-                                       'metadata must be a dict'):
+            with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
                 self._metadata_constructor_(metadata=md)
 
     def test_constructor_no_metadata(self):
@@ -244,7 +189,6 @@ class MetadataMixinTests(object):
 
     def test_metadata_setter(self):
         obj = self._metadata_constructor_()
-
         self.assertFalse(obj.has_metadata())
 
         obj.metadata = {'hello': 'world'}
@@ -275,8 +219,7 @@ class MetadataMixinTests(object):
 
         for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
                    pd.DataFrame()):
-            with six.assertRaisesRegex(self, TypeError,
-                                       'metadata must be a dict'):
+            with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
                 obj.metadata = md
             self.assertEqual(obj.metadata, {123: 456})
 
@@ -319,49 +262,54 @@ class MetadataMixinTests(object):
                         metadata={'foo': 42}).has_metadata())
 
 
-class PositionalMetadataMixinTests(object):
+class PositionalMetadataMixinTests:
     def test_constructor_invalid_positional_metadata_type(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Invalid positional metadata. Must be '
-                                   'consumable by `pd.DataFrame` constructor. '
-                                   'Original pandas error message: '):
+        with self.assertRaisesRegex(TypeError,
+                                    'Invalid positional metadata. Must be '
+                                    'consumable by `pd.DataFrame` constructor.'
+                                    ' Original pandas error message: '):
             self._positional_metadata_constructor_(0, positional_metadata=2)
 
     def test_constructor_positional_metadata_len_mismatch(self):
         # Zero elements.
-        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(0\).*\(4\)'):
             self._positional_metadata_constructor_(4, positional_metadata=[])
 
         # Not enough elements.
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
             self._positional_metadata_constructor_(
                 4, positional_metadata=[2, 3, 4])
 
         # Too many elements.
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
             self._positional_metadata_constructor_(
                 4, positional_metadata=[2, 3, 4, 5, 6])
 
         # Series not enough rows.
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
             self._positional_metadata_constructor_(
                 4, positional_metadata=pd.Series(range(3)))
 
         # Series too many rows.
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
             self._positional_metadata_constructor_(
                 4, positional_metadata=pd.Series(range(5)))
 
         # DataFrame not enough rows.
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(3\).*\(4\)'):
             self._positional_metadata_constructor_(
                 4, positional_metadata=pd.DataFrame({'quality': range(3)}))
 
         # DataFrame too many rows.
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
+        with self.assertRaisesRegex(ValueError, '\(5\).*\(4\)'):
             self._positional_metadata_constructor_(
                 4, positional_metadata=pd.DataFrame({'quality': range(5)}))
 
+        # Empty DataFrame wrong size.
+        with self.assertRaisesRegex(ValueError, '\(2\).*\(3\)'):
+            self._positional_metadata_constructor_(
+                3, positional_metadata=pd.DataFrame(index=range(2)))
+
     def test_constructor_no_positional_metadata(self):
         # Length zero with missing/empty positional metadata.
         for empty in None, {}, pd.DataFrame():
@@ -369,16 +317,18 @@ class PositionalMetadataMixinTests(object):
                 0, positional_metadata=empty)
 
             self.assertFalse(obj.has_positional_metadata())
+            self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
             assert_data_frame_almost_equal(obj.positional_metadata,
-                                           pd.DataFrame(index=np.arange(0)))
+                                           pd.DataFrame(index=range(0)))
 
         # Nonzero length with missing positional metadata.
         obj = self._positional_metadata_constructor_(
             3, positional_metadata=None)
 
         self.assertFalse(obj.has_positional_metadata())
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
         assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
+                                       pd.DataFrame(index=range(3)))
 
     def test_constructor_with_positional_metadata_len_zero(self):
         for data in [], (), np.array([]):
@@ -388,7 +338,7 @@ class PositionalMetadataMixinTests(object):
             self.assertTrue(obj.has_positional_metadata())
             assert_data_frame_almost_equal(
                 obj.positional_metadata,
-                pd.DataFrame({'foo': data}, index=np.arange(0)))
+                pd.DataFrame({'foo': data}, index=range(0)))
 
     def test_constructor_with_positional_metadata_len_one(self):
         for data in [2], (2, ), np.array([2]):
@@ -398,7 +348,7 @@ class PositionalMetadataMixinTests(object):
             self.assertTrue(obj.has_positional_metadata())
             assert_data_frame_almost_equal(
                 obj.positional_metadata,
-                pd.DataFrame({'foo': data}, index=np.arange(1)))
+                pd.DataFrame({'foo': data}, index=range(1)))
 
     def test_constructor_with_positional_metadata_len_greater_than_one(self):
         for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
@@ -410,7 +360,7 @@ class PositionalMetadataMixinTests(object):
             self.assertTrue(obj.has_positional_metadata())
             assert_data_frame_almost_equal(
                 obj.positional_metadata,
-                pd.DataFrame({'foo': data}, index=np.arange(9)))
+                pd.DataFrame({'foo': data}, index=range(9)))
 
     def test_constructor_with_positional_metadata_multiple_columns(self):
         obj = self._positional_metadata_constructor_(
@@ -421,7 +371,7 @@ class PositionalMetadataMixinTests(object):
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': np.arange(5),
-                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
+                          'bar': np.arange(5)[::-1]}, index=range(5)))
 
     def test_constructor_with_positional_metadata_custom_index(self):
         df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
@@ -433,7 +383,23 @@ class PositionalMetadataMixinTests(object):
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': np.arange(5),
-                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
+                          'bar': np.arange(5)[::-1]}, index=range(5)))
+
+    def test_constructor_with_positional_metadata_int64_index(self):
+        # Test that memory-inefficient index is converted to memory-efficient
+        # index.
+        df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
+                          index=np.arange(5))
+        self.assertIsInstance(df.index, pd.Int64Index)
+
+        obj = self._positional_metadata_constructor_(
+            5, positional_metadata=df)
+
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': np.arange(5),
+                          'bar': np.arange(5)[::-1]}, index=range(5)))
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
 
     def test_constructor_handles_missing_positional_metadata_efficiently(self):
         obj = self._positional_metadata_constructor_(4)
@@ -452,7 +418,7 @@ class PositionalMetadataMixinTests(object):
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
         self.assertIsNot(obj.positional_metadata, df)
 
         # Original df is not mutated.
@@ -465,21 +431,21 @@ class PositionalMetadataMixinTests(object):
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
 
         # Change single value of underlying data.
         df.values[0][0] = 10
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
 
         # Mutate list (not a deep copy).
         df['bar'][0].append(42)
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
 
     def test_eq_basic(self):
         obj1 = self._positional_metadata_constructor_(
@@ -557,6 +523,13 @@ class PositionalMetadataMixinTests(object):
         obj2 = self._positional_metadata_constructor_(3)
         self.assertReallyNotEqual(obj1, obj2)
 
+    def test_ne_len_mismatch(self):
+        obj1 = self._positional_metadata_constructor_(
+            3, positional_metadata=pd.DataFrame(index=range(3)))
+        obj2 = self._positional_metadata_constructor_(
+            2, positional_metadata=pd.DataFrame(index=range(2)))
+        self.assertReallyNotEqual(obj1, obj2)
+
     def test_copy_positional_metadata_none(self):
         obj = self._positional_metadata_constructor_(3)
         obj_copy = copy.copy(obj)
@@ -607,6 +580,16 @@ class PositionalMetadataMixinTests(object):
             pd.DataFrame({'bar': [[1], [], [], []],
                           'baz': [42, 42, 42, 42]}))
 
+    def test_copy_preserves_range_index(self):
+        for pm in None, {'foo': ['a', 'b', 'c']}:
+            obj = self._positional_metadata_constructor_(
+                3, positional_metadata=pm)
+            obj_copy = copy.copy(obj)
+
+            self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
+            self.assertIsInstance(obj_copy.positional_metadata.index,
+                                  pd.RangeIndex)
+
     def test_deepcopy_positional_metadata_none(self):
         obj = self._positional_metadata_constructor_(3)
         obj_copy = copy.deepcopy(obj)
@@ -657,6 +640,16 @@ class PositionalMetadataMixinTests(object):
             pd.DataFrame({'bar': [[], [], [], []],
                           'baz': [42, 42, 42, 42]}))
 
+    def test_deepcopy_preserves_range_index(self):
+        for pm in None, {'foo': ['a', 'b', 'c']}:
+            obj = self._positional_metadata_constructor_(
+                3, positional_metadata=pm)
+            obj_copy = copy.deepcopy(obj)
+
+            self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
+            self.assertIsInstance(obj_copy.positional_metadata.index,
+                                  pd.RangeIndex)
+
     def test_deepcopy_memo_is_respected(self):
         # Basic test to ensure deepcopy's memo is passed through to recursive
         # deepcopy calls.
@@ -671,6 +664,7 @@ class PositionalMetadataMixinTests(object):
             3, positional_metadata={'foo': [22, 22, 0]})
 
         self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
         assert_data_frame_almost_equal(obj.positional_metadata,
                                        pd.DataFrame({'foo': [22, 22, 0]}))
 
@@ -691,9 +685,10 @@ class PositionalMetadataMixinTests(object):
 
         self.assertIsNone(obj._positional_metadata)
         self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
         assert_data_frame_almost_equal(
             obj.positional_metadata,
-            pd.DataFrame(index=np.arange(4)))
+            pd.DataFrame(index=range(4)))
         self.assertIsNotNone(obj._positional_metadata)
 
     def test_positional_metadata_getter_set_column_series(self):
@@ -719,9 +714,9 @@ class PositionalMetadataMixinTests(object):
         # array-like objects will fail if wrong size.
         for array_like in (np.array(range(length-1)), range(length-1),
                            np.array(range(length+1)), range(length+1)):
-            with six.assertRaisesRegex(self, ValueError,
-                                       "Length of values does not match "
-                                       "length of index"):
+            with self.assertRaisesRegex(ValueError,
+                                        "Length of values does not match "
+                                        "length of index"):
                 obj.positional_metadata['bar'] = array_like
 
     def test_positional_metadata_setter_pandas_consumable(self):
@@ -737,7 +732,7 @@ class PositionalMetadataMixinTests(object):
         obj.positional_metadata = pd.DataFrame(index=np.arange(3))
         self.assertFalse(obj.has_positional_metadata())
         assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
+                                       pd.DataFrame(index=range(3)))
 
     def test_positional_metadata_setter_data_frame(self):
         obj = self._positional_metadata_constructor_(3)
@@ -747,13 +742,14 @@ class PositionalMetadataMixinTests(object):
         obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},
                                                index=['a', 'b', 'c'])
         self.assertTrue(obj.has_positional_metadata())
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
         assert_data_frame_almost_equal(obj.positional_metadata,
                                        pd.DataFrame({'foo': [3, 2, 1]}))
 
         obj.positional_metadata = pd.DataFrame(index=np.arange(3))
         self.assertFalse(obj.has_positional_metadata())
         assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
+                                       pd.DataFrame(index=range(3)))
 
     def test_positional_metadata_setter_none(self):
         obj = self._positional_metadata_constructor_(
@@ -768,7 +764,24 @@ class PositionalMetadataMixinTests(object):
 
         self.assertFalse(obj.has_positional_metadata())
         assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(0)))
+                                       pd.DataFrame(index=range(0)))
+
+    def test_positional_metadata_setter_int64_index(self):
+        # Test that memory-inefficient index is converted to memory-efficient
+        # index.
+        obj = self._positional_metadata_constructor_(5)
+
+        df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
+                          index=np.arange(5))
+        self.assertIsInstance(df.index, pd.Int64Index)
+
+        obj.positional_metadata = df
+
+        assert_data_frame_almost_equal(
+            obj.positional_metadata,
+            pd.DataFrame({'foo': np.arange(5),
+                          'bar': np.arange(5)[::-1]}, index=range(5)))
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
 
     def test_positional_metadata_setter_makes_shallow_copy(self):
         obj = self._positional_metadata_constructor_(3)
@@ -780,7 +793,7 @@ class PositionalMetadataMixinTests(object):
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
         self.assertIsNot(obj.positional_metadata, df)
 
         # Original df is not mutated.
@@ -793,30 +806,30 @@ class PositionalMetadataMixinTests(object):
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
 
         # Change single value of underlying data.
         df.values[0][0] = 10
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
 
         # Mutate list (not a deep copy).
         df['bar'][0].append(42)
         assert_data_frame_almost_equal(
             obj.positional_metadata,
             pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
-                         index=np.arange(3)))
+                         index=range(3)))
 
     def test_positional_metadata_setter_invalid_type(self):
         obj = self._positional_metadata_constructor_(
             3, positional_metadata={'foo': [1, 2, 42]})
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Invalid positional metadata. Must be '
-                                   'consumable by `pd.DataFrame` constructor. '
-                                   'Original pandas error message: '):
+        with self.assertRaisesRegex(TypeError,
+                                    'Invalid positional metadata. Must be '
+                                    'consumable by `pd.DataFrame` constructor.'
+                                    ' Original pandas error message: '):
             obj.positional_metadata = 2
 
         assert_data_frame_almost_equal(obj.positional_metadata,
@@ -827,13 +840,13 @@ class PositionalMetadataMixinTests(object):
             3, positional_metadata={'foo': [1, 2, 42]})
 
         # `None` behavior differs from constructor.
-        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
+        with self.assertRaisesRegex(ValueError, '\(0\).*\(3\)'):
             obj.positional_metadata = None
 
         assert_data_frame_almost_equal(obj.positional_metadata,
                                        pd.DataFrame({'foo': [1, 2, 42]}))
 
-        with six.assertRaisesRegex(self, ValueError, '\(4\).*\(3\)'):
+        with self.assertRaisesRegex(ValueError, '\(4\).*\(3\)'):
             obj.positional_metadata = [1, 2, 3, 4]
 
         assert_data_frame_almost_equal(obj.positional_metadata,
@@ -843,6 +856,7 @@ class PositionalMetadataMixinTests(object):
         obj = self._positional_metadata_constructor_(
             3, positional_metadata={'foo': [1, 2, 3]})
 
+        self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
         assert_data_frame_almost_equal(obj.positional_metadata,
                                        pd.DataFrame({'foo': [1, 2, 3]}))
 
@@ -890,346 +904,3 @@ class PositionalMetadataMixinTests(object):
         obj = self._positional_metadata_constructor_(
             2, positional_metadata={'foo': [1, 2], 'bar': ['abc', 'def']})
         self.assertTrue(obj.has_positional_metadata())
-
-
- at nose.tools.nottest
-class SuppressSkbioWarnings(nose.plugins.Plugin):
-    def configure(self, options, conf):
-        super(SuppressSkbioWarnings, self).configure(options, conf)
-        self.enabled = True
-
-    def beforeTest(self, test):
-        warnings.simplefilter("ignore", category=SkbioWarning)
-
-    def afterTest(self, test):
-        warnings.resetwarnings()
-
-
- at nose.tools.nottest
-class TestRunner(object):
-    """Simple wrapper class around nosetests functionality.
-
-    Parameters
-    ----------
-    filename : str
-        __file__ attribute passed in from the caller. This tells the
-        tester where to start looking for tests.
-
-    Notes
-    -----
-    The primary purpose of this class is to create an interface which users
-    of scikit-bio can use to run all of the built in tests. Normally this
-    would be done by invoking nosetests directly from the command line, but
-    scikit-bio needs several additional options which make the command long
-    and ugly. This class invokes nose with the required options.
-
-    """
-    @experimental(as_of="0.4.0")
-    def __init__(self, filename):
-        self._filename = filename
-        self._test_dir = os.path.dirname(filename)
-
-    @experimental(as_of="0.4.0")
-    def test(self, verbose=False):
-        """Performs the actual running of the tests.
-
-        Parameters
-        ----------
-        verbose : bool
-            flag for running in verbose mode.
-
-        Returns
-        -------
-        bool
-            test run success status
-        """
-        # NOTE: it doesn't seem to matter what the first element of the argv
-        # list is, there just needs to be something there.
-        argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING']
-        if PY3:
-            argv.extend(['--with-doctest', '--doctest-tests'])
-        if verbose:
-            argv.append('-v')
-        return nose.core.run(argv=argv, defaultTest=self._test_dir,
-                             addplugins=[SuppressSkbioWarnings()])
-
-
- at experimental(as_of="0.4.0")
-def get_data_path(fn, subfolder='data'):
-    """Return path to filename ``fn`` in the data folder.
-
-    During testing it is often necessary to load data files. This
-    function returns the full path to files in the ``data`` subfolder
-    by default.
-
-    Parameters
-    ----------
-    fn : str
-        File name.
-
-    subfolder : str, defaults to ``data``
-        Name of the subfolder that contains the data.
-
-
-    Returns
-    -------
-    str
-        Inferred absolute path to the test data for the module where
-        ``get_data_path(fn)`` is called.
-
-    Notes
-    -----
-    The requested path may not point to an existing file, as its
-    existence is not checked.
-
-    """
-    # getouterframes returns a list of tuples: the second tuple
-    # contains info about the caller, and the second element is its
-    # filename
-    callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
-    path = os.path.dirname(os.path.abspath(callers_filename))
-    data_path = os.path.join(path, subfolder, fn)
-    return data_path
-
-
- at experimental(as_of="0.4.0")
-def assert_ordination_results_equal(left, right, ignore_method_names=False,
-                                    ignore_axis_labels=False,
-                                    ignore_biplot_scores_labels=False,
-                                    ignore_directionality=False,
-                                    decimal=7):
-    """Assert that ordination results objects are equal.
-
-    This is a helper function intended to be used in unit tests that need to
-    compare ``OrdinationResults`` objects.
-
-    Parameters
-    ----------
-    left, right : OrdinationResults
-        Ordination results to be compared for equality.
-    ignore_method_names : bool, optional
-        Ignore differences in `short_method_name` and `long_method_name`.
-    ignore_axis_labels : bool, optional
-        Ignore differences in axis labels (i.e., column labels).
-    ignore_biplot_scores_labels : bool, optional
-        Ignore differences in `biplot_scores` row and column labels.
-    ignore_directionality : bool, optional
-        Ignore differences in directionality (i.e., differences in signs) for
-        attributes `samples`, `features` and `biplot_scores`.
-
-    Raises
-    ------
-    AssertionError
-        If the two objects are not equal.
-
-    """
-    npt.assert_equal(type(left) is type(right), True)
-
-    if not ignore_method_names:
-        npt.assert_equal(left.short_method_name, right.short_method_name)
-        npt.assert_equal(left.long_method_name, right.long_method_name)
-
-    _assert_frame_equal(left.samples, right.samples,
-                        ignore_columns=ignore_axis_labels,
-                        ignore_directionality=ignore_directionality,
-                        decimal=decimal)
-
-    _assert_frame_equal(left.features, right.features,
-                        ignore_columns=ignore_axis_labels,
-                        ignore_directionality=ignore_directionality,
-                        decimal=decimal)
-
-    _assert_frame_equal(left.biplot_scores, right.biplot_scores,
-                        ignore_biplot_scores_labels,
-                        ignore_biplot_scores_labels,
-                        ignore_directionality=ignore_directionality,
-                        decimal=decimal)
-
-    _assert_frame_equal(left.sample_constraints, right.sample_constraints,
-                        ignore_columns=ignore_axis_labels,
-                        ignore_directionality=ignore_directionality,
-                        decimal=decimal)
-
-    _assert_series_equal(left.eigvals, right.eigvals, ignore_axis_labels,
-                         decimal=decimal)
-
-    _assert_series_equal(left.proportion_explained, right.proportion_explained,
-                         ignore_axis_labels,
-                         decimal=decimal)
-
-
-def _assert_series_equal(left_s, right_s, ignore_index=False, decimal=7):
-    # assert_series_equal doesn't like None...
-    if left_s is None or right_s is None:
-        assert left_s is None and right_s is None
-    else:
-        npt.assert_almost_equal(left_s.values, right_s.values,
-                                decimal=decimal)
-        if not ignore_index:
-            pdt.assert_index_equal(left_s.index, right_s.index)
-
-
-def _assert_frame_equal(left_df, right_df, ignore_index=False,
-                        ignore_columns=False, ignore_directionality=False,
-                        decimal=7):
-    # assert_frame_equal doesn't like None...
-    if left_df is None or right_df is None:
-        assert left_df is None and right_df is None
-    else:
-        left_values = left_df.values
-        right_values = right_df.values
-
-        if ignore_directionality:
-            left_values, right_values = _normalize_signs(left_values,
-                                                         right_values)
-        npt.assert_almost_equal(left_values, right_values, decimal=decimal)
-
-        if not ignore_index:
-            pdt.assert_index_equal(left_df.index, right_df.index)
-        if not ignore_columns:
-            pdt.assert_index_equal(left_df.columns, right_df.columns)
-
-
-def _normalize_signs(arr1, arr2):
-    """Change column signs so that "column" and "-column" compare equal.
-
-    This is needed because results of eigenproblmes can have signs
-    flipped, but they're still right.
-
-    Notes
-    =====
-
-    This function tries hard to make sure that, if you find "column"
-    and "-column" almost equal, calling a function like np.allclose to
-    compare them after calling `normalize_signs` succeeds.
-
-    To do so, it distinguishes two cases for every column:
-
-    - It can be all almost equal to 0 (this includes a column of
-      zeros).
-    - Otherwise, it has a value that isn't close to 0.
-
-    In the first case, no sign needs to be flipped. I.e., for
-    |epsilon| small, np.allclose(-epsilon, 0) is true if and only if
-    np.allclose(epsilon, 0) is.
-
-    In the second case, the function finds the number in the column
-    whose absolute value is largest. Then, it compares its sign with
-    the number found in the same index, but in the other array, and
-    flips the sign of the column as needed.
-    """
-    # Let's convert everyting to floating point numbers (it's
-    # reasonable to assume that eigenvectors will already be floating
-    # point numbers). This is necessary because np.array(1) /
-    # np.array(0) != np.array(1.) / np.array(0.)
-    arr1 = np.asarray(arr1, dtype=np.float64)
-    arr2 = np.asarray(arr2, dtype=np.float64)
-
-    if arr1.shape != arr2.shape:
-        raise ValueError(
-            "Arrays must have the same shape ({0} vs {1}).".format(arr1.shape,
-                                                                   arr2.shape)
-            )
-
-    # To avoid issues around zero, we'll compare signs of the values
-    # with highest absolute value
-    max_idx = np.abs(arr1).argmax(axis=0)
-    max_arr1 = arr1[max_idx, range(arr1.shape[1])]
-    max_arr2 = arr2[max_idx, range(arr2.shape[1])]
-
-    sign_arr1 = np.sign(max_arr1)
-    sign_arr2 = np.sign(max_arr2)
-
-    # Store current warnings, and ignore division by zero (like 1. /
-    # 0.) and invalid operations (like 0. / 0.)
-    wrn = np.seterr(invalid='ignore', divide='ignore')
-    differences = sign_arr1 / sign_arr2
-    # The values in `differences` can be:
-    #    1 -> equal signs
-    #   -1 -> diff signs
-    #   Or nan (0/0), inf (nonzero/0), 0 (0/nonzero)
-    np.seterr(**wrn)
-
-    # Now let's deal with cases where `differences != \pm 1`
-    special_cases = (~np.isfinite(differences)) | (differences == 0)
-    # In any of these cases, the sign of the column doesn't matter, so
-    # let's just keep it
-    differences[special_cases] = 1
-
-    return arr1 * differences, arr2
-
-
- at experimental(as_of="0.4.0")
-def assert_data_frame_almost_equal(left, right):
-    """Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
-
-    Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values
-    are considered "almost equal" if they are within a threshold defined by
-    ``assert_frame_equal``. This wrapper uses a number of
-    checks that are turned off by default in ``assert_frame_equal`` in order to
-    perform stricter comparisons (for example, ensuring the index and column
-    types are the same). It also does not consider empty ``pd.DataFrame``
-    objects equal if they have a different index.
-
-    Other notes:
-
-    * Index (row) and column ordering must be the same for objects to be equal.
-    * NaNs (``np.nan``) in the same locations are considered equal.
-
-    This is a helper function intended to be used in unit tests that need to
-    compare ``pd.DataFrame`` objects.
-
-    Parameters
-    ----------
-    left, right : pd.DataFrame
-        ``pd.DataFrame`` objects to compare.
-
-    Raises
-    ------
-    AssertionError
-        If `left` and `right` are not "almost equal".
-
-    See Also
-    --------
-    pandas.util.testing.assert_frame_equal
-
-    """
-    # pass all kwargs to ensure this function has consistent behavior even if
-    # `assert_frame_equal`'s defaults change
-    pdt.assert_frame_equal(left, right,
-                           check_dtype=True,
-                           check_index_type=True,
-                           check_column_type=True,
-                           check_frame_type=True,
-                           check_less_precise=False,
-                           check_names=True,
-                           by_blocks=False,
-                           check_exact=False)
-    # this check ensures that empty DataFrames with different indices do not
-    # compare equal. exact=True specifies that the type of the indices must be
-    # exactly the same
-    assert_index_equal(left.index, right.index)
-
-
-def assert_series_almost_equal(left, right):
-    # pass all kwargs to ensure this function has consistent behavior even if
-    # `assert_series_equal`'s defaults change
-    pdt.assert_series_equal(left, right,
-                            check_dtype=True,
-                            check_index_type=True,
-                            check_series_type=True,
-                            check_less_precise=False,
-                            check_names=True,
-                            check_exact=False,
-                            check_datetimelike_compat=False,
-                            obj='Series')
-    # this check ensures that empty Series with different indices do not
-    # compare equal.
-    assert_index_equal(left.index, right.index)
-
-
-def assert_index_equal(a, b):
-    pdt.assert_index_equal(a, b,
-                           exact=True,
-                           check_names=True,
-                           check_exact=True)
diff --git a/skbio/diversity/alpha/tests/__init__.py b/skbio/metadata/tests/__init__.py
similarity index 84%
copy from skbio/diversity/alpha/tests/__init__.py
copy to skbio/metadata/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/diversity/alpha/tests/__init__.py
+++ b/skbio/metadata/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/metadata/tests/test_mixin.py b/skbio/metadata/tests/test_mixin.py
new file mode 100644
index 0000000..a76a6ec
--- /dev/null
+++ b/skbio/metadata/tests/test_mixin.py
@@ -0,0 +1,80 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2013--, scikit-bio development team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# ----------------------------------------------------------------------------
+
+import unittest
+
+from skbio.metadata._mixin import MetadataMixin, PositionalMetadataMixin
+from skbio.util._decorator import overrides
+from skbio.util._testing import ReallyEqualMixin
+from skbio.metadata._testing import (MetadataMixinTests,
+                                     PositionalMetadataMixinTests)
+
+
+class TestMetadataMixin(unittest.TestCase, ReallyEqualMixin,
+                        MetadataMixinTests):
+    def setUp(self):
+        class ExampleMetadataMixin(MetadataMixin):
+            def __init__(self, metadata=None):
+                MetadataMixin._init_(self, metadata=metadata)
+
+            def __eq__(self, other):
+                return MetadataMixin._eq_(self, other)
+
+            def __ne__(self, other):
+                return MetadataMixin._ne_(self, other)
+
+            def __copy__(self):
+                copy = self.__class__(metadata=None)
+                copy._metadata = MetadataMixin._copy_(self)
+                return copy
+
+            def __deepcopy__(self, memo):
+                copy = self.__class__(metadata=None)
+                copy._metadata = MetadataMixin._deepcopy_(self, memo)
+                return copy
+
+        self._metadata_constructor_ = ExampleMetadataMixin
+
+
+class TestPositionalMetadataMixin(unittest.TestCase, ReallyEqualMixin,
+                                  PositionalMetadataMixinTests):
+    def setUp(self):
+        class ExamplePositionalMetadataMixin(PositionalMetadataMixin):
+            @overrides(PositionalMetadataMixin)
+            def _positional_metadata_axis_len_(self):
+                return self._axis_len
+
+            def __init__(self, axis_len, positional_metadata=None):
+                self._axis_len = axis_len
+
+                PositionalMetadataMixin._init_(
+                    self, positional_metadata=positional_metadata)
+
+            def __eq__(self, other):
+                return PositionalMetadataMixin._eq_(self, other)
+
+            def __ne__(self, other):
+                return PositionalMetadataMixin._ne_(self, other)
+
+            def __copy__(self):
+                copy = self.__class__(self._axis_len, positional_metadata=None)
+                copy._positional_metadata = \
+                    PositionalMetadataMixin._copy_(self)
+                return copy
+
+            def __deepcopy__(self, memo):
+                copy = self.__class__(self._axis_len, positional_metadata=None)
+                copy._positional_metadata = \
+                    PositionalMetadataMixin._deepcopy_(self, memo)
+                return copy
+
+        self._positional_metadata_constructor_ = ExamplePositionalMetadataMixin
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/skbio/sequence/__init__.py b/skbio/sequence/__init__.py
index 85296fa..0ee84aa 100644
--- a/skbio/sequence/__init__.py
+++ b/skbio/sequence/__init__.py
@@ -63,7 +63,7 @@ Stats:
     length: 8
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 62.50%
 -----------------------------
 0 ACCGGGTA
@@ -75,30 +75,30 @@ reverse complement or degapped (i.e., unaligned) version.
 >>> d2 = d1.degap()
 >>> d2
 DNA
------------------------------
+--------------------------
 Metadata:
     'id': 'my-sequence'
 Stats:
     length: 8
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 62.50%
------------------------------
+--------------------------
 0 ACCGGGTA
 >>> d3 = d2.reverse_complement()
 >>> d3
 DNA
------------------------------
+--------------------------
 Metadata:
     'id': 'my-sequence'
 Stats:
     length: 8
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 62.50%
------------------------------
+--------------------------
 0 TACCCGGT
 
 It's also straightforward to compute distances between sequences (optionally
@@ -137,36 +137,36 @@ Those slices can be used to extract the relevant subsequences.
 ...     r5[motif]
 ...     print('')
 RNA
------------------------------
+--------------------------
 Stats:
     length: 3
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 66.67%
------------------------------
+--------------------------
 0 AGG
 <BLANKLINE>
 RNA
------------------------------
+--------------------------
 Stats:
     length: 3
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 66.67%
------------------------------
+--------------------------
 0 GGA
 <BLANKLINE>
 RNA
------------------------------
+--------------------------
 Stats:
     length: 3
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 33.33%
------------------------------
+--------------------------
 0 GAA
 <BLANKLINE>
 
@@ -177,25 +177,25 @@ otherwise meaningful motifs.
 ...     r5[motif]
 ...     print('')
 RNA
------------------------------
+--------------------------
 Stats:
     length: 7
     has gaps: True
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 66.67%
------------------------------
+--------------------------
 0 AGG-GGA
 <BLANKLINE>
 RNA
------------------------------
+--------------------------
 Stats:
     length: 3
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 33.33%
------------------------------
+--------------------------
 0 GAA
 <BLANKLINE>
 
@@ -207,25 +207,25 @@ the input.
 ...     r5[motif].degap()
 ...     print('')
 RNA
------------------------------
+--------------------------
 Stats:
     length: 6
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 66.67%
------------------------------
+--------------------------
 0 AGGGGA
 <BLANKLINE>
 RNA
------------------------------
+--------------------------
 Stats:
     length: 3
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 33.33%
------------------------------
+--------------------------
 0 GAA
 <BLANKLINE>
 
@@ -242,14 +242,14 @@ DNA can be transcribed to RNA:
 >>> rna = dna.transcribe()
 >>> rna
 RNA
------------------------------
+--------------------------
 Stats:
     length: 12
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     GC-content: 25.00%
------------------------------
+--------------------------
 0 AUGUGUAUUU GA
 
 Both DNA and RNA can be translated into a protein sequence. For example, let's
@@ -259,26 +259,26 @@ ID 1, the default genetic code in scikit-bio):
 >>> protein_from_dna = dna.translate()
 >>> protein_from_dna
 Protein
------------------------------
+--------------------------
 Stats:
     length: 4
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     has stops: True
------------------------------
+--------------------------
 0 MCI*
 >>> protein_from_rna = rna.translate()
 >>> protein_from_rna
 Protein
------------------------------
+--------------------------
 Stats:
     length: 4
     has gaps: False
     has degenerates: False
-    has non-degenerates: True
+    has definites: True
     has stops: True
------------------------------
+--------------------------
 0 MCI*
 
 The two translations are equivalent:
@@ -304,8 +304,6 @@ Class-level methods contain information about the molecule types.
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._sequence import Sequence
diff --git a/skbio/sequence/_dna.py b/skbio/sequence/_dna.py
index 686d5d1..6f4c5a6 100644
--- a/skbio/sequence/_dna.py
+++ b/skbio/sequence/_dna.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from six import add_metaclass
-
 import skbio
 from skbio.util._decorator import classproperty, overrides
 from skbio.util._decorator import stable
@@ -16,8 +13,8 @@ from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
 from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
 
 
- at add_metaclass(DisableSubclassingMeta)
-class DNA(GrammaredSequence, NucleotideMixin):
+class DNA(GrammaredSequence, NucleotideMixin,
+          metaclass=DisableSubclassingMeta):
     """Store DNA sequence data and optional associated metadata.
 
     Only characters in the IUPAC DNA character set [1]_ are supported.
@@ -58,7 +55,7 @@ class DNA(GrammaredSequence, NucleotideMixin):
     alphabet
     gap_chars
     default_gap_char
-    nondegenerate_chars
+    definite_chars
     degenerate_chars
     degenerate_map
     complement_map
@@ -87,28 +84,28 @@ class DNA(GrammaredSequence, NucleotideMixin):
     >>> from skbio import DNA
     >>> DNA('ACCGAAT')
     DNA
-    -----------------------------
+    --------------------------
     Stats:
         length: 7
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         GC-content: 42.86%
-    -----------------------------
+    --------------------------
     0 ACCGAAT
 
     Convert lowercase characters to uppercase:
 
     >>> DNA('AcCGaaT', lowercase=True)
     DNA
-    -----------------------------
+    --------------------------
     Stats:
         length: 7
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         GC-content: 42.86%
-    -----------------------------
+    --------------------------
     0 ACCGAAT
 
     """
@@ -127,7 +124,7 @@ class DNA(GrammaredSequence, NucleotideMixin):
 
     @classproperty
     @overrides(GrammaredSequence)
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set("ACGT")
 
     @classproperty
@@ -183,25 +180,25 @@ class DNA(GrammaredSequence, NucleotideMixin):
         >>> dna = DNA('TAACGTTA')
         >>> dna
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 8
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 25.00%
-        -----------------------------
+        --------------------------
         0 TAACGTTA
         >>> dna.transcribe()
         RNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 8
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 25.00%
-        -----------------------------
+        --------------------------
         0 UAACGUUA
 
         """
@@ -260,14 +257,14 @@ class DNA(GrammaredSequence, NucleotideMixin):
         >>> dna = DNA('ATGCCACTTTAA')
         >>> dna.translate()
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 MPL*
 
         Translate the same DNA sequence using a different NCBI genetic code
@@ -276,14 +273,14 @@ class DNA(GrammaredSequence, NucleotideMixin):
 
         >>> dna.translate(3, stop='require')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 MPT
 
         """
@@ -347,69 +344,69 @@ class DNA(GrammaredSequence, NucleotideMixin):
         ...     protein
         ...     print('')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 MPL*
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 CHF
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 ATL
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 LKWH
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 *SG
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 KVA
         <BLANKLINE>
 
diff --git a/skbio/sequence/_genetic_code.py b/skbio/sequence/_genetic_code.py
index 5856be3..466f125 100644
--- a/skbio/sequence/_genetic_code.py
+++ b/skbio/sequence/_genetic_code.py
@@ -6,10 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
-from future.builtins import range
 
 from skbio.util._decorator import classproperty, stable, classonlymethod
 from skbio._base import SkbioObject
@@ -106,14 +103,14 @@ class GeneticCode(SkbioObject):
     >>> rna = RNA('AUGCCACUUUAA')
     >>> GeneticCode.from_ncbi().translate(rna)
     Protein
-    -----------------------------
+    --------------------------
     Stats:
         length: 4
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         has stops: True
-    -----------------------------
+    --------------------------
     0 MPL*
 
     """
@@ -129,7 +126,7 @@ class GeneticCode(SkbioObject):
             # indices corresponding to U, C, A, and G. 255 was chosen to
             # represent invalid character offsets because it will create an
             # invalid (out of bounds) index into `amino_acids` which should
-            # error noisily. this is important in case the valid nondegenerate
+            # error noisily. this is important in case the valid definite
             # IUPAC RNA characters change in the future and the assumptions
             # currently made by the code become invalid
             table = np.empty(ord(b'U') + 1, dtype=np.uint8)
@@ -469,14 +466,14 @@ class GeneticCode(SkbioObject):
         >>> sgc = GeneticCode.from_ncbi()
         >>> sgc.translate(rna)
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 7
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 SILPL*E
 
         In this command, we used the default ``start`` behavior, which starts
@@ -487,14 +484,14 @@ class GeneticCode(SkbioObject):
 
         >>> sgc.translate(rna, start='require')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 5
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 MPL*E
 
         Note that the codon coding for L (CUG) is an alternative start codon in
@@ -508,14 +505,14 @@ class GeneticCode(SkbioObject):
 
         >>> sgc.translate(rna, start='require', stop='require')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 MPL
 
         Passing "require" to both ``start`` and ``stop`` trims the translation
@@ -614,7 +611,7 @@ class GeneticCode(SkbioObject):
             raise NotImplementedError("scikit-bio does not currently support "
                                       "translation of degenerate sequences."
                                       "`RNA.expand_degenerates` can be used "
-                                      "to obtain all non-degenerate versions "
+                                      "to obtain all definite versions "
                                       "of a degenerate sequence.")
 
     def _raise_require_error(self, name, reading_frame):
@@ -683,69 +680,69 @@ class GeneticCode(SkbioObject):
         ...     protein
         ...     print('')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 MPL*
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 CHF
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 ATL
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 LKWH
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 *SG
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 KVA
         <BLANKLINE>
 
diff --git a/skbio/sequence/_grammared_sequence.py b/skbio/sequence/_grammared_sequence.py
index 8a658f1..b9c8396 100644
--- a/skbio/sequence/_grammared_sequence.py
+++ b/skbio/sequence/_grammared_sequence.py
@@ -6,18 +6,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from abc import ABCMeta, abstractproperty
 from itertools import product
 import re
 
 import numpy as np
-from six import add_metaclass
-
 
 from skbio.util._decorator import (classproperty, overrides, stable,
-                                   experimental)
+                                   deprecated, experimental)
 from skbio.util._misc import MiniRegistry
 from ._sequence import Sequence
 
@@ -30,8 +26,8 @@ class GrammaredSequenceMeta(ABCMeta, type):
             type(cls.gap_chars) is not abstractproperty
         concrete_degenerate_map = \
             type(cls.degenerate_map) is not abstractproperty
-        concrete_nondegenerate_chars = \
-            type(cls.nondegenerate_chars) is not abstractproperty
+        concrete_definite_chars = \
+            type(cls.definite_chars) is not abstractproperty
         concrete_default_gap_char = \
             type(cls.default_gap_char) is not abstractproperty
         # degenerate_chars is not abstract but it depends on degenerate_map
@@ -44,7 +40,7 @@ class GrammaredSequenceMeta(ABCMeta, type):
         # probably check all the attributes on the class and make sure none of
         # them are abstract.
         if (concrete_gap_chars and concrete_degenerate_map and
-                concrete_nondegenerate_chars and concrete_default_gap_char and
+                concrete_definite_chars and concrete_default_gap_char and
                 concrete_degenerate_chars):
 
             if cls.default_gap_char not in cls.gap_chars:
@@ -58,21 +54,21 @@ class GrammaredSequenceMeta(ABCMeta, type):
                     "characters for class %s" % name)
 
             for key in cls.degenerate_map.keys():
-                for nondegenerate in cls.degenerate_map[key]:
-                    if nondegenerate not in cls.nondegenerate_chars:
+                for definite_char in cls.degenerate_map[key]:
+                    if definite_char not in cls.definite_chars:
                         raise TypeError(
                             "degenerate_map must expand only to "
-                            "characters included in nondegenerate_chars "
+                            "characters included in definite_chars "
                             "for class %s" % name)
 
-            if len(cls.degenerate_chars & cls.nondegenerate_chars) > 0:
+            if len(cls.degenerate_chars & cls.definite_chars) > 0:
                 raise TypeError(
-                    "degenerate_chars and nondegenerate_chars must not "
+                    "degenerate_chars and definite_chars must not "
                     "share any characters for class %s" % name)
 
-            if len(cls.gap_chars & cls.nondegenerate_chars) > 0:
+            if len(cls.gap_chars & cls.definite_chars) > 0:
                 raise TypeError(
-                    "gap_chars and nondegenerate_chars must not share any "
+                    "gap_chars and definite_chars must not share any "
                     "characters for class %s" % name)
 
         return cls
@@ -97,8 +93,7 @@ class DisableSubclassingMeta(GrammaredSequenceMeta):
                                                           dict(dct))
 
 
- at add_metaclass(GrammaredSequenceMeta)
-class GrammaredSequence(Sequence):
+class GrammaredSequence(Sequence, metaclass=GrammaredSequenceMeta):
     """Store sequence data conforming to a character set.
 
     This is an abstract base class (ABC) that cannot be instantiated.
@@ -114,7 +109,7 @@ class GrammaredSequence(Sequence):
     alphabet
     gap_chars
     default_gap_char
-    nondegenerate_chars
+    definite_chars
     degenerate_chars
     degenerate_map
 
@@ -150,9 +145,10 @@ class GrammaredSequence(Sequence):
     ...         return {"X": set("AB")}
     ...
     ...     @classproperty
-    ...     def nondegenerate_chars(cls):
+    ...     def definite_chars(cls):
     ...         return set("ABC")
     ...
+    ...
     ...     @classproperty
     ...     def default_gap_char(cls):
     ...         return '-'
@@ -164,31 +160,31 @@ class GrammaredSequence(Sequence):
     >>> seq = CustomSequence('ABABACAC')
     >>> seq
     CustomSequence
-    -----------------------------
+    --------------------------
     Stats:
         length: 8
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
-    -----------------------------
+        has definites: True
+    --------------------------
     0 ABABACAC
 
     >>> seq = CustomSequence('XXXXXX')
     >>> seq
     CustomSequence
-    ------------------------------
+    -------------------------
     Stats:
         length: 6
         has gaps: False
         has degenerates: True
-        has non-degenerates: False
-    ------------------------------
+        has definites: False
+    -------------------------
     0 XXXXXX
 
     """
     __validation_mask = None
     __degenerate_codes = None
-    __nondegenerate_codes = None
+    __definite_char_codes = None
     __gap_codes = None
 
     @classproperty
@@ -209,11 +205,12 @@ class GrammaredSequence(Sequence):
         return cls.__degenerate_codes
 
     @classproperty
-    def _nondegenerate_codes(cls):
-        if cls.__nondegenerate_codes is None:
-            nondegens = cls.nondegenerate_chars
-            cls.__nondegenerate_codes = np.asarray([ord(d) for d in nondegens])
-        return cls.__nondegenerate_codes
+    def _definite_char_codes(cls):
+        if cls.__definite_char_codes is None:
+            definite_chars = cls.definite_chars
+            cls.__definite_char_codes = np.asarray(
+                [ord(d) for d in definite_chars])
+        return cls.__definite_char_codes
 
     @classproperty
     def _gap_codes(cls):
@@ -227,7 +224,7 @@ class GrammaredSequence(Sequence):
     def alphabet(cls):
         """Return valid characters.
 
-        This includes gap, non-degenerate, and degenerate characters.
+        This includes gap, definite, and degenerate characters.
 
         Returns
         -------
@@ -235,7 +232,7 @@ class GrammaredSequence(Sequence):
             Valid characters.
 
         """
-        return cls.degenerate_chars | cls.nondegenerate_chars | cls.gap_chars
+        return cls.degenerate_chars | cls.definite_chars | cls.gap_chars
 
     @abstractproperty
     @classproperty
@@ -249,7 +246,7 @@ class GrammaredSequence(Sequence):
             Characters defined as gaps.
 
         """
-        pass  # pragma: no cover
+        raise NotImplementedError
 
     @abstractproperty
     @classproperty
@@ -267,7 +264,7 @@ class GrammaredSequence(Sequence):
             Default gap character.
 
         """
-        pass  # pragma: no cover
+        raise NotImplementedError
 
     @classproperty
     @stable(as_of='0.4.0')
@@ -282,9 +279,9 @@ class GrammaredSequence(Sequence):
         """
         return set(cls.degenerate_map)
 
-    @abstractproperty
     @classproperty
-    @stable(as_of='0.4.0')
+    @deprecated(as_of='0.5.0', until='0.5.2',
+                reason='Renamed to definite_chars')
     def nondegenerate_chars(cls):
         """Return non-degenerate characters.
 
@@ -294,22 +291,36 @@ class GrammaredSequence(Sequence):
             Non-degenerate characters.
 
         """
-        pass  # pragma: no cover
+        return cls.definite_chars
+
+    @abstractproperty
+    @classproperty
+    @stable(as_of='0.5.0')
+    def definite_chars(cls):
+        """Return definite characters.
+
+        Returns
+        -------
+        set
+            Definite characters.
+
+        """
+        raise NotImplementedError
 
     @abstractproperty
     @classproperty
     @stable(as_of='0.4.0')
     def degenerate_map(cls):
-        """Return mapping of degenerate to non-degenerate characters.
+        """Return mapping of degenerate to definite characters.
 
         Returns
         -------
         dict (set)
             Mapping of each degenerate character to the set of
-            non-degenerate characters it represents.
+            definite characters it represents.
 
         """
-        pass  # pragma: no cover
+        raise NotImplementedError
 
     @property
     def _motifs(self):
@@ -410,8 +421,8 @@ class GrammaredSequence(Sequence):
         See Also
         --------
         has_degenerates
-        nondegenerates
-        has_nondegenerates
+        definites
+        has_definites
 
         Examples
         --------
@@ -436,8 +447,8 @@ class GrammaredSequence(Sequence):
         See Also
         --------
         degenerates
-        nondegenerates
-        has_nondegenerates
+        definites
+        has_definites
 
         Examples
         --------
@@ -454,7 +465,33 @@ class GrammaredSequence(Sequence):
         # TODO: cache results
         return bool(self.degenerates().any())
 
-    @stable(as_of='0.4.0')
+    @stable(as_of='0.5.0')
+    def definites(self):
+        """Find positions containing definite characters in the sequence.
+
+        Returns
+        -------
+        1D np.ndarray (bool)
+            Boolean vector where ``True`` indicates a definite character
+            is present at that position in the biological sequence.
+
+        See Also
+        --------
+        has_definites
+        degenerates
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('ACWGN')
+        >>> s.definites()
+        array([ True,  True, False,  True, False], dtype=bool)
+
+        """
+        return np.in1d(self._bytes, self._definite_char_codes)
+
+    @deprecated(as_of='0.5.0', until='0.5.2',
+                reason='Renamed to definites')
     def nondegenerates(self):
         """Find positions containing non-degenerate characters in the sequence.
 
@@ -466,9 +503,8 @@ class GrammaredSequence(Sequence):
 
         See Also
         --------
-        has_nondegenerates
+        has_definites
         degenerates
-        has_nondegenerates
 
         Examples
         --------
@@ -478,9 +514,40 @@ class GrammaredSequence(Sequence):
         array([ True,  True, False,  True, False], dtype=bool)
 
         """
-        return np.in1d(self._bytes, self._nondegenerate_codes)
+        return self.definites()
 
-    @stable(as_of='0.4.0')
+    @stable(as_of='0.5.0')
+    def has_definites(self):
+        """Determine if sequence contains one or more definite characters
+
+        Returns
+        -------
+        bool
+            Indicates whether there are one or more occurrences of
+            definite characters in the biological sequence.
+
+        See Also
+        --------
+        definites
+        degenerates
+        has_degenerates
+
+        Examples
+        --------
+        >>> from skbio import DNA
+        >>> s = DNA('NWNNNNNN')
+        >>> s.has_definites()
+        False
+        >>> t = DNA('ANCACWWGACGTT')
+        >>> t.has_definites()
+        True
+
+        """
+        # TODO: cache results
+        return bool(self.definites().any())
+
+    @deprecated(as_of='0.5.0', until='0.5.2',
+                reason='Renamed to has_definites')
     def has_nondegenerates(self):
         """Determine if sequence contains one or more non-degenerate characters
 
@@ -492,7 +559,7 @@ class GrammaredSequence(Sequence):
 
         See Also
         --------
-        nondegenerates
+        definites
         degenerates
         has_degenerates
 
@@ -508,7 +575,7 @@ class GrammaredSequence(Sequence):
 
         """
         # TODO: cache results
-        return bool(self.nondegenerates().any())
+        return self.has_definites()
 
     @stable(as_of='0.4.0')
     def degap(self):
@@ -544,7 +611,7 @@ class GrammaredSequence(Sequence):
             length: 9
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 55.56%
         -----------------------------
         0 GGTCCATTC
@@ -554,12 +621,12 @@ class GrammaredSequence(Sequence):
 
     @stable(as_of='0.4.0')
     def expand_degenerates(self):
-        """Yield all possible non-degenerate versions of the sequence.
+        """Yield all possible definite versions of the sequence.
 
         Yields
         ------
         GrammaredSequence
-            Non-degenerate version of the sequence.
+            Definite version of the sequence.
 
         See Also
         --------
@@ -567,11 +634,11 @@ class GrammaredSequence(Sequence):
 
         Notes
         -----
-        There is no guaranteed ordering to the non-degenerate sequences that
-        are yielded.
+        There is no guaranteed ordering to the definite sequences that are
+        yielded.
 
-        Each non-degenerate sequence will have the same type, metadata,
-        and positional metadata as the biological sequence.
+        Each definite sequence will have the same type, metadata, and
+        positional metadata as the biological sequence.
 
         Examples
         --------
@@ -582,31 +649,31 @@ class GrammaredSequence(Sequence):
         ...     s
         ...     print('')
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 33.33%
-        -----------------------------
+        --------------------------
         0 TAG
         <BLANKLINE>
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 66.67%
-        -----------------------------
+        --------------------------
         0 TGG
         <BLANKLINE>
 
         """
         degen_chars = self.degenerate_map
-        nonexpansion_chars = self.nondegenerate_chars.union(self.gap_chars)
+        nonexpansion_chars = self.definite_chars.union(self.gap_chars)
 
         expansions = []
         for char in self:
@@ -616,9 +683,19 @@ class GrammaredSequence(Sequence):
             else:
                 expansions.append(degen_chars[char])
 
-        result = product(*expansions)
-        return (self._to(sequence=''.join(nondegen_seq)) for nondegen_seq in
-                result)
+        metadata = None
+        if self.has_metadata():
+            metadata = self.metadata
+
+        positional_metadata = None
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+
+        for definite_seq in product(*expansions):
+            yield self._constructor(
+                sequence=''.join(definite_seq),
+                metadata=metadata,
+                positional_metadata=positional_metadata)
 
     @stable(as_of='0.4.1')
     def to_regex(self):
@@ -628,8 +705,8 @@ class GrammaredSequence(Sequence):
         -------
         regex
             Pre-compiled regular expression object (as from ``re.compile``)
-            that matches all non-degenerate versions of this sequence, and
-            nothing else.
+            that matches all definite versions of this sequence, and nothing
+            else.
 
         Examples
         --------
@@ -722,7 +799,7 @@ class GrammaredSequence(Sequence):
         stats = super(GrammaredSequence, self)._repr_stats()
         stats.append(('has gaps', '%r' % self.has_gaps()))
         stats.append(('has degenerates', '%r' % self.has_degenerates()))
-        stats.append(('has non-degenerates', '%r' % self.has_nondegenerates()))
+        stats.append(('has definites', '%r' % self.has_definites()))
         return stats
 
 
diff --git a/skbio/sequence/_nucleotide_mixin.py b/skbio/sequence/_nucleotide_mixin.py
index 771cf2d..cf421e8 100644
--- a/skbio/sequence/_nucleotide_mixin.py
+++ b/skbio/sequence/_nucleotide_mixin.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import with_metaclass
-
 from abc import ABCMeta, abstractproperty
 
 import numpy as np
@@ -17,7 +14,7 @@ from skbio.util._decorator import classproperty, stable
 from ._grammared_sequence import _motifs as parent_motifs
 
 
-class NucleotideMixin(with_metaclass(ABCMeta, object)):
+class NucleotideMixin(metaclass=ABCMeta):
     """Mixin for adding funtionality for working with sequences of nucleotides.
 
     This is an abstract base class (ABC) that cannot be instantiated.
@@ -74,7 +71,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
         the complement of ``A`` is ambiguous. Thanks, nature...
 
         """
-        return set()  # pragma: no cover
+        raise NotImplementedError
 
     @stable(as_of='0.4.0')
     def complement(self, reverse=False):
@@ -112,7 +109,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
             length: 6
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 16.67%
         -----------------------------
         0 TTCATT
@@ -125,7 +122,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
             length: 6
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 16.67%
         -----------------------------
         0 AAGTAA
@@ -139,7 +136,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
             length: 6
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 16.67%
         -----------------------------
         0 AATGAA
@@ -148,7 +145,20 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
 
         """
         result = self._complement_lookup[self._bytes]
-        complement = self._to(sequence=result)
+
+        metadata = None
+        if self.has_metadata():
+            metadata = self.metadata
+
+        positional_metadata = None
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+
+        complement = self._constructor(
+            sequence=result,
+            metadata=metadata,
+            positional_metadata=positional_metadata)
+
         if reverse:
             complement = complement[::-1]
         return complement
@@ -188,7 +198,7 @@ class NucleotideMixin(with_metaclass(ABCMeta, object)):
             length: 6
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 16.67%
         -----------------------------
         0 AATGAA
diff --git a/skbio/sequence/_protein.py b/skbio/sequence/_protein.py
index 4639a60..afb601b 100644
--- a/skbio/sequence/_protein.py
+++ b/skbio/sequence/_protein.py
@@ -6,10 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
-from six import add_metaclass
 
 from skbio.util._decorator import classproperty, overrides
 from skbio.util._decorator import stable
@@ -17,8 +14,7 @@ from ._grammared_sequence import (GrammaredSequence, DisableSubclassingMeta,
                                   _motifs as parent_motifs)
 
 
- at add_metaclass(DisableSubclassingMeta)
-class Protein(GrammaredSequence):
+class Protein(GrammaredSequence, metaclass=DisableSubclassingMeta):
     """Store protein sequence data and optional associated metadata.
 
     Only characters in the IUPAC protein character set [1]_ are supported.
@@ -60,7 +56,7 @@ class Protein(GrammaredSequence):
     gap_chars
     default_gap_char
     stop_chars
-    nondegenerate_chars
+    definite_chars
     degenerate_chars
     degenerate_map
 
@@ -87,28 +83,28 @@ class Protein(GrammaredSequence):
     >>> from skbio import Protein
     >>> Protein('PAW')
     Protein
-    -----------------------------
+    --------------------------
     Stats:
         length: 3
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         has stops: False
-    -----------------------------
+    --------------------------
     0 PAW
 
     Convert lowercase characters to uppercase:
 
     >>> Protein('paW', lowercase=True)
     Protein
-    -----------------------------
+    --------------------------
     Stats:
         length: 3
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         has stops: False
-    -----------------------------
+    --------------------------
     0 PAW
 
     """
@@ -128,7 +124,7 @@ class Protein(GrammaredSequence):
 
     @classproperty
     @overrides(GrammaredSequence)
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set("ACDEFGHIKLMNPQRSTVWY")
 
     @classproperty
diff --git a/skbio/sequence/_repr.py b/skbio/sequence/_repr.py
index 2a1249d..b3e5d80 100644
--- a/skbio/sequence/_repr.py
+++ b/skbio/sequence/_repr.py
@@ -6,12 +6,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import math
 
 from skbio.util._misc import chunk_str
-from skbio.util._metadata_repr import _MetadataReprBuilder
+from skbio.metadata._repr import _MetadataReprBuilder
 
 
 class _SequenceReprBuilder(_MetadataReprBuilder):
diff --git a/skbio/sequence/_rna.py b/skbio/sequence/_rna.py
index 3548a93..a5dba07 100644
--- a/skbio/sequence/_rna.py
+++ b/skbio/sequence/_rna.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from six import add_metaclass
-
 import skbio
 from skbio.util._decorator import classproperty, overrides
 from skbio.util._decorator import stable
@@ -16,8 +13,8 @@ from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
 from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
 
 
- at add_metaclass(DisableSubclassingMeta)
-class RNA(GrammaredSequence, NucleotideMixin):
+class RNA(GrammaredSequence, NucleotideMixin,
+          metaclass=DisableSubclassingMeta):
     """Store RNA sequence data and optional associated metadata.
 
     Only characters in the IUPAC RNA character set [1]_ are supported.
@@ -58,7 +55,7 @@ class RNA(GrammaredSequence, NucleotideMixin):
     alphabet
     gap_chars
     default_gap_char
-    nondegenerate_chars
+    definite_chars
     degenerate_chars
     degenerate_map
     complement_map
@@ -87,28 +84,28 @@ class RNA(GrammaredSequence, NucleotideMixin):
     >>> from skbio import RNA
     >>> RNA('ACCGAAU')
     RNA
-    -----------------------------
+    --------------------------
     Stats:
         length: 7
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         GC-content: 42.86%
-    -----------------------------
+    --------------------------
     0 ACCGAAU
 
     Convert lowercase characters to uppercase:
 
     >>> RNA('AcCGaaU', lowercase=True)
     RNA
-    -----------------------------
+    --------------------------
     Stats:
         length: 7
         has gaps: False
         has degenerates: False
-        has non-degenerates: True
+        has definites: True
         GC-content: 42.86%
-    -----------------------------
+    --------------------------
     0 ACCGAAU
 
     """
@@ -127,7 +124,7 @@ class RNA(GrammaredSequence, NucleotideMixin):
 
     @classproperty
     @overrides(GrammaredSequence)
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set("ACGU")
 
     @classproperty
@@ -184,25 +181,25 @@ class RNA(GrammaredSequence, NucleotideMixin):
         >>> rna = RNA('UAACGUUA')
         >>> rna
         RNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 8
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 25.00%
-        -----------------------------
+        --------------------------
         0 UAACGUUA
         >>> rna.reverse_transcribe()
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 8
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 25.00%
-        -----------------------------
+        --------------------------
         0 TAACGTTA
         """
         seq = self._string.replace(b'U', b'T')
@@ -259,14 +256,14 @@ class RNA(GrammaredSequence, NucleotideMixin):
         >>> rna = RNA('AUGCCACUUUAA')
         >>> rna.translate()
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 MPL*
 
         Translate the same RNA sequence using a different NCBI genetic code
@@ -275,14 +272,14 @@ class RNA(GrammaredSequence, NucleotideMixin):
 
         >>> rna.translate(3, stop='require')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 MPT
 
         """
@@ -350,69 +347,69 @@ class RNA(GrammaredSequence, NucleotideMixin):
         ...     protein
         ...     print('')
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 MPL*
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 CHF
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 ATL
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 4
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 LKWH
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: True
-        -----------------------------
+        --------------------------
         0 *SG
         <BLANKLINE>
         Protein
-        -----------------------------
+        --------------------------
         Stats:
             length: 3
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             has stops: False
-        -----------------------------
+        --------------------------
         0 KVA
         <BLANKLINE>
 
diff --git a/skbio/sequence/_sequence.py b/skbio/sequence/_sequence.py
index ed15cbc..6a2c21a 100644
--- a/skbio/sequence/_sequence.py
+++ b/skbio/sequence/_sequence.py
@@ -6,11 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import range, zip
-from future.utils import viewitems
-import six
-
 import re
 import collections
 import numbers
@@ -20,7 +15,8 @@ import numpy as np
 import pandas as pd
 
 import skbio.sequence.distance
-from skbio._base import SkbioObject, MetadataMixin, PositionalMetadataMixin
+from skbio._base import SkbioObject
+from skbio.metadata._mixin import MetadataMixin, PositionalMetadataMixin
 from skbio.sequence._repr import _SequenceReprBuilder
 from skbio.util._decorator import (stable, experimental, deprecated,
                                    classonlymethod, overrides)
@@ -445,14 +441,14 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         >>> s2 = DNA("GGAA")
         >>> DNA.concat([s1, s2])
         DNA
-        -----------------------------
+        --------------------------
         Stats:
             length: 8
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 50.00%
-        -----------------------------
+        --------------------------
         0 ACGTGGAA
 
         Concatenate DNA sequences into a Sequence object (type coercion):
@@ -473,7 +469,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         >>> result = DNA.concat([s1, s2], how='outer')
         >>> result
         DNA
-        -----------------------------
+        ---------------------------
         Positional metadata:
             'one': <dtype: bool>
             'two': <dtype: float64>
@@ -481,9 +477,9 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
             length: 8
             has gaps: False
             has degenerates: False
-            has non-degenerates: True
+            has definites: True
             GC-content: 50.00%
-        -----------------------------
+        ---------------------------
         0 ACGTGGAA
         >>> result.positional_metadata
              one  two
@@ -491,10 +487,10 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         1   True  NaN
         2   True  NaN
         3  False  NaN
-        4  False    1
-        5  False    2
-        6   True    3
-        7  False    4
+        4  False  1.0
+        5  False  2.0
+        6   True  3.0
+        7  False  4.0
 
         """
         if how not in {'strict', 'inner', 'outer'}:
@@ -509,13 +505,13 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
 
         if how == 'strict':
             how = 'inner'
-            cols = []
+            cols = set()
             for s in seqs:
                 if s.has_positional_metadata():
-                    cols.append(frozenset(s.positional_metadata))
+                    cols.add(frozenset(s.positional_metadata))
                 else:
-                    cols.append(frozenset())
-            if len(set(cols)) > 1:
+                    cols.add(frozenset())
+            if len(cols) > 1:
                 raise ValueError("The positional metadata of the sequences do"
                                  " not have matching columns. Consider setting"
                                  " how='inner' or how='outer'")
@@ -564,25 +560,19 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
             # Sequence casting is acceptable between direct
             # decendants/ancestors
             sequence._assert_can_cast_to(type(self))
-            # we're not simply accessing sequence.metadata in order to avoid
-            # creating "empty" metadata representations on both sequence
-            # objects if they don't have metadata. same strategy is used below
-            # for positional metadata
+
             if metadata is None and sequence.has_metadata():
                 metadata = sequence.metadata
             if (positional_metadata is None and
                     sequence.has_positional_metadata()):
                 positional_metadata = sequence.positional_metadata
-            sequence = sequence._bytes
 
+            sequence = sequence._bytes
             self._owns_bytes = False
-
             self._set_bytes(sequence)
-
         else:
-            # Python 3 will not raise a UnicodeEncodeError so we force it by
-            # encoding it as ascii
-            if isinstance(sequence, six.text_type):
+            # Encode as ascii to raise UnicodeEncodeError if necessary.
+            if isinstance(sequence, str):
                 sequence = sequence.encode("ascii")
             s = np.fromstring(sequence, dtype=np.uint8)
 
@@ -605,7 +595,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
 
         if lowercase is False:
             pass
-        elif lowercase is True or isinstance(lowercase, six.string_types):
+        elif lowercase is True or isinstance(lowercase, str):
             lowercase_mask = self._bytes > self._ascii_lowercase_boundary
             self._convert_to_uppercase(lowercase_mask)
 
@@ -846,7 +836,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
 
         """
         if (not isinstance(indexable, np.ndarray) and
-            ((not isinstance(indexable, six.string_types)) and
+            ((not isinstance(indexable, str)) and
              hasattr(indexable, '__iter__'))):
             indexable_ = indexable
             indexable = np.asarray(indexable)
@@ -869,9 +859,15 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
                                              self.positional_metadata, index))
                         positional_metadata = pd.concat(pos_md_slices)
 
-                    return self._to(sequence=seq,
-                                    positional_metadata=positional_metadata)
-        elif (isinstance(indexable, six.string_types) or
+                    metadata = None
+                    if self.has_metadata():
+                        metadata = self.metadata
+
+                    return self._constructor(
+                        sequence=seq,
+                        metadata=metadata,
+                        positional_metadata=positional_metadata)
+        elif (isinstance(indexable, str) or
                 isinstance(indexable, bool)):
             raise IndexError("Cannot index with %s type: %r" %
                              (type(indexable).__name__, indexable))
@@ -891,7 +887,14 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         seq = self._bytes[indexable]
         positional_metadata = self._slice_positional_metadata(indexable)
 
-        return self._to(sequence=seq, positional_metadata=positional_metadata)
+        metadata = None
+        if self.has_metadata():
+            metadata = self.metadata
+
+        return self._constructor(
+            sequence=seq,
+            metadata=metadata,
+            positional_metadata=positional_metadata)
 
     def _slice_positional_metadata(self, indexable):
         if self.has_positional_metadata():
@@ -942,8 +945,6 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         """
         return len(self) > 0
 
-    __nonzero__ = __bool__
-
     @stable(as_of="0.4.0")
     def __iter__(self):
         """Iterate over positions in this sequence.
@@ -1408,6 +1409,87 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         return self._string.count(
             self._munge_to_bytestring(subsequence, "count"), start, end)
 
+    @experimental(as_of="0.5.0")
+    def replace(self, where, character):
+        """Replace values in this sequence with a different character.
+
+        Parameters
+        ----------
+        where : 1D array_like (bool) or iterable (slices or ints) or str
+            Indicates positions in the sequence to replace with `character`.
+            Can be a boolean vector, an iterable of indices/slices, or a
+            string that is a key in `positional_metadata` pointing to a
+            boolean vector.
+        character : str or bytes
+            Character that will replace chosen items in this sequence.
+
+        Returns
+        -------
+        Sequence
+            Copy of this sequence, with chosen items replaced with chosen
+            character. All metadata is retained.
+
+        Examples
+        --------
+        Let's create and display a Sequence:
+
+        >>> from skbio import Sequence
+        >>> sequence = Sequence('GGTACCAACG')
+        >>> str(sequence)
+        'GGTACCAACG'
+
+        Let's call ``replace`` on the Sequence using a boolean vector for
+        ``where`` and assign it to a new variable:
+
+        >>> seq = sequence.replace([False, False, False, True, False, False,
+        ...                         True, True, False, False], '-')
+
+        Let's take a look at the new Sequence:
+
+        >>> str(seq)
+        'GGT-CC--CG'
+
+        Other types of input are accepted by the ``where`` parameter. Let's
+        pass in a list of indices and slices that is equivalent to the boolean
+        vector we used previously:
+
+        >>> str(seq) == str(sequence.replace([3, slice(6, 8)], '-'))
+        True
+
+        ``where`` also accepts a boolean vector contained in
+        ``Sequence.positional_metadata``:
+
+        >>> sequence.positional_metadata = {'where':
+        ...                                 [False, False, False, True, False,
+        ...                                  False, True, True, False, False]}
+
+        Let's pass in the key ``'where'`` and compare to ``seq``:
+
+        >>> str(seq) == str(sequence.replace('where', '-'))
+        True
+
+        """
+        if type(character) is not bytes:
+            character = character.encode('ascii')
+        character = ord(character)
+        index = self._munge_to_index_array(where)
+        seq_bytes = self._bytes.copy()
+        seq_bytes[index] = character
+
+        metadata = None
+        if self.has_metadata():
+            metadata = self.metadata
+
+        positional_metadata = None
+        if self.has_positional_metadata():
+            positional_metadata = self.positional_metadata
+
+        # Use __class__ instead of _constructor so that validations are
+        # performed for subclasses (the user could have introduced invalid
+        # characters).
+        return self.__class__(seq_bytes, metadata=metadata,
+                              positional_metadata=positional_metadata)
+
     @stable(as_of="0.4.0")
     def index(self, subsequence, start=None, end=None):
         """Find position where subsequence first occurs in the sequence.
@@ -1793,8 +1875,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
 
     def _chars_to_indices(self, chars):
         """Helper for Sequence.frequencies."""
-        if isinstance(chars, six.string_types) or \
-                isinstance(chars, six.binary_type):
+        if isinstance(chars, (str, bytes)):
             chars = set([chars])
         elif not isinstance(chars, set):
             raise TypeError(
@@ -1805,8 +1886,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         chars = list(chars)
         indices = []
         for char in chars:
-            if not (isinstance(char, six.string_types) or
-                    isinstance(char, six.binary_type)):
+            if not isinstance(char, (str, bytes)):
                 raise TypeError(
                     "Each element of `chars` must be string-like, not %r" %
                     type(char).__name__)
@@ -1874,15 +1954,25 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
             step = k
             count = len(self) // k
 
-        if self.has_positional_metadata():
+        if len(self) == 0 or self.has_positional_metadata():
+            # Slower path when sequence is empty or positional metadata needs
+            # to be sliced.
             for i in range(0, len(self) - k + 1, step):
                 yield self[i:i+k]
-        # Optimized path when no positional metadata
         else:
+            # Optimized path when positional metadata doesn't need slicing.
             kmers = np.lib.stride_tricks.as_strided(
                 self._bytes, shape=(k, count), strides=(1, step)).T
+
+            metadata = None
+            if self.has_metadata():
+                metadata = self.metadata
+
             for s in kmers:
-                yield self._to(sequence=s)
+                yield self._constructor(
+                    sequence=s,
+                    metadata=metadata,
+                    positional_metadata=None)
 
     @stable(as_of="0.4.0")
     def kmer_frequencies(self, k, overlap=True, relative=False):
@@ -1931,7 +2021,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
                 num_kmers = len(self) // k
 
             relative_freqs = {}
-            for kmer, count in viewitems(freqs):
+            for kmer, count in freqs.items():
                 relative_freqs[kmer] = count / num_kmers
             freqs = relative_freqs
 
@@ -1968,7 +2058,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         'TATAA'
 
         """
-        if isinstance(regex, six.string_types):
+        if isinstance(regex, str):
             regex = re.compile(regex)
 
         lookup = np.arange(len(self))
@@ -2055,53 +2145,6 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
             if len(r) >= min_length:
                 yield r
 
-    def _to(self, sequence=None, metadata=None, positional_metadata=None):
-        """Return a copy of this sequence.
-
-        Returns a copy of this sequence, optionally with updated attributes
-        specified as keyword arguments.
-
-        Arguments are the same as those passed to the ``Sequence`` constructor.
-        The returned copy will have its attributes updated based on the
-        arguments. If an attribute is missing, the copy will keep the same
-        attribute as this sequence. Valid attribute names are `'sequence'`,
-        `'metadata'`, and `'positional_metadata'`. Default behavior is to
-        return a copy of this sequence without changing any attributes.
-
-        Parameters
-        ----------
-        sequence : optional
-        metadata : optional
-        positional_metadata : optional
-
-        Returns
-        -------
-        Sequence
-            Copy of this sequence, optionally with updated attributes based on
-            arguments. Will be the same type as this sequence (`self`).
-
-        Notes
-        -----
-        By default, `metadata` and `positional_metadata` are shallow-copied and
-        the reference to `sequence` is used (without copying) for efficiency
-        since `sequence` is immutable. This differs from the behavior of
-        `Sequence.copy`, which will actually copy `sequence`.
-
-        This method is the preferred way of creating new instances from an
-        existing sequence, instead of calling ``self.__class__(...)``, as the
-        latter can be error-prone (e.g., it's easy to forget to propagate
-        attributes to the new instance).
-
-        """
-        if sequence is None:
-            sequence = self._bytes
-        if metadata is None and self.has_metadata():
-            metadata = self._metadata
-        if positional_metadata is None and self.has_positional_metadata():
-            positional_metadata = self._positional_metadata
-        return self._constructor(sequence=sequence, metadata=metadata,
-                                 positional_metadata=positional_metadata)
-
     def _constructor(self, **kwargs):
         return self.__class__(**kwargs)
 
@@ -2109,7 +2152,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
         """Return an index array from something isomorphic to a boolean vector.
 
         """
-        if isinstance(sliceable, six.string_types):
+        if isinstance(sliceable, str):
             if sliceable in self.positional_metadata:
                 if self.positional_metadata[sliceable].dtype == np.bool:
                     sliceable = self.positional_metadata[sliceable]
@@ -2186,7 +2229,7 @@ class Sequence(MetadataMixin, PositionalMetadataMixin, collections.Sequence,
     def _munge_to_bytestring(self, other, method):
         if type(other) is bytes:
             return other
-        elif isinstance(other, six.string_types):
+        elif isinstance(other, str):
             return other.encode('ascii')
         else:
             return self._munge_to_sequence(other, method)._string
diff --git a/skbio/sequence/distance.py b/skbio/sequence/distance.py
index 568db42..2d8c056 100644
--- a/skbio/sequence/distance.py
+++ b/skbio/sequence/distance.py
@@ -17,6 +17,7 @@ Functions
    :toctree: generated/
 
    hamming
+   kmer_distance
 
 """
 
@@ -28,8 +29,6 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 import scipy.spatial.distance
 
@@ -87,16 +86,7 @@ def hamming(seq1, seq2):
     0.5
 
     """
-    for seq in seq1, seq2:
-        if not isinstance(seq, skbio.Sequence):
-            raise TypeError(
-                "`seq1` and `seq2` must be Sequence instances, not %r"
-                % type(seq).__name__)
-
-    if type(seq1) is not type(seq2):
-        raise TypeError(
-            "Sequences must have matching type. Type %r does not match type %r"
-            % (type(seq1).__name__, type(seq2).__name__))
+    _check_seqs(seq1, seq2)
 
     # Hamming requires equal length sequences. We are checking this here
     # because the error you would get otherwise is cryptic.
@@ -113,3 +103,76 @@ def hamming(seq1, seq2):
         distance = scipy.spatial.distance.hamming(seq1.values, seq2.values)
 
     return float(distance)
+
+
+ at experimental(as_of='0.5.0')
+def kmer_distance(seq1, seq2, k, overlap=True):
+    """Compute the kmer distance between a pair of sequences
+
+    The kmer distance between two sequences is the fraction of kmers that are
+    unique to either sequence.
+
+    Parameters
+    ----------
+    seq1, seq2 : Sequence
+        Sequences to compute kmer distance between.
+    k : int
+        The kmer length.
+    overlap : bool, optional
+        Defines whether the kmers should be overlapping or not.
+
+    Returns
+    -------
+    float
+        kmer distance between `seq1` and `seq2`.
+
+    Raises
+    ------
+    ValueError
+        If `k` is less than 1.
+    TypeError
+        If `seq1` and `seq2` are not ``Sequence`` instances.
+    TypeError
+        If `seq1` and `seq2` are not the same type.
+
+    Notes
+    -----
+    kmer counts are not incorporated in this distance metric.
+
+    ``np.nan`` will be returned if there are no kmers defined for the
+    sequences.
+
+    Examples
+    --------
+    >>> from skbio import Sequence
+    >>> seq1 = Sequence('ATCGGCGAT')
+    >>> seq2 = Sequence('GCAGATGTG')
+    >>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS
+    0.9230769230...
+
+    """
+    _check_seqs(seq1, seq2)
+    seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap)))
+    seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap)))
+    all_kmers = seq1_kmers | seq2_kmers
+    if not all_kmers:
+        return np.nan
+    shared_kmers = seq1_kmers & seq2_kmers
+    number_unique = len(all_kmers) - len(shared_kmers)
+    fraction_unique = number_unique / len(all_kmers)
+    return fraction_unique
+
+
+def _check_seqs(seq1, seq2):
+    # Asserts both sequences are skbio.sequence objects
+    for seq in seq1, seq2:
+        if not isinstance(seq, skbio.Sequence):
+            raise TypeError(
+                "`seq1` and `seq2` must be Sequence instances, not %r"
+                % type(seq).__name__)
+
+    # Asserts sequences have the same type
+    if type(seq1) is not type(seq2):
+        raise TypeError(
+            "Sequences must have matching type. Type %r does not match type %r"
+            % (type(seq1).__name__, type(seq2).__name__))
diff --git a/skbio/sequence/tests/__init__.py b/skbio/sequence/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/sequence/tests/__init__.py
+++ b/skbio/sequence/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/sequence/tests/test_distance.py b/skbio/sequence/tests/test_distance.py
index 905c359..b24221e 100644
--- a/skbio/sequence/tests/test_distance.py
+++ b/skbio/sequence/tests/test_distance.py
@@ -6,17 +6,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import itertools
 import unittest
 
-import six
 import numpy as np
 import numpy.testing as npt
 
 from skbio import Sequence, DNA
-from skbio.sequence.distance import hamming
+from skbio.sequence.distance import hamming, kmer_distance
 
 
 class TestHamming(unittest.TestCase):
@@ -24,27 +21,25 @@ class TestHamming(unittest.TestCase):
         seq1 = Sequence('abc')
         seq2 = 'abc'
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'seq1.*seq2.*Sequence.*str'):
+        with self.assertRaisesRegex(TypeError, 'seq1.*seq2.*Sequence.*str'):
             hamming(seq1, seq2)
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'seq1.*seq2.*Sequence.*str'):
+        with self.assertRaisesRegex(TypeError, 'seq1.*seq2.*Sequence.*str'):
             hamming(seq2, seq1)
 
     def test_type_mismatch(self):
         seq1 = Sequence('ABC')
         seq2 = DNA('ACG')
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Sequence.*does not match.*DNA'):
+        with self.assertRaisesRegex(TypeError,
+                                    'Sequence.*does not match.*DNA'):
             hamming(seq1, seq2)
 
     def test_length_mismatch(self):
         seq1 = Sequence('ABC')
         seq2 = Sequence('ABCD')
 
-        with six.assertRaisesRegex(self, ValueError, 'equal length.*3 != 4'):
+        with self.assertRaisesRegex(ValueError, 'equal length.*3 != 4'):
             hamming(seq1, seq2)
 
     def test_return_type(self):
@@ -126,5 +121,114 @@ class TestHamming(unittest.TestCase):
             self.assertEqual(distance, 0.75)
 
 
+class TestKmerDistance(unittest.TestCase):
+    def test_default_kwargs(self):
+        seq1 = Sequence('AACCTAGCAATGGAT')
+        seq2 = Sequence('CAGGCAGTTCTCACC')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 0.9130434782608695
+        self.assertAlmostEqual(obs, exp)
+
+    def test_nondefault_k(self):
+        seq1 = Sequence('GCTTATGGAGAGAGA')
+        seq2 = Sequence('CTCGAACTCCAGCCA')
+        obs = kmer_distance(seq1, seq2, 2)
+        exp = 0.7333333333333333
+        self.assertAlmostEqual(obs, exp)
+        seq1 = Sequence('EADDECAEECDEACD')
+        seq2 = Sequence('DCBCBADADABCCDA')
+        obs = kmer_distance(seq1, seq2, 1)
+        exp = 0.4
+        self.assertAlmostEqual(obs, exp)
+
+    def test_overlap_false(self):
+        seq1 = Sequence('CGTTATGTCTGTGAT')
+        seq2 = Sequence('CTGAATCGGTAGTGT')
+        obs = kmer_distance(seq1, seq2, 3, overlap=False)
+        exp = 0.8888888888888888
+        self.assertAlmostEqual(obs, exp)
+
+    def test_entirely_different_sequences(self):
+        seq1 = Sequence('CCGTGGTCGTATAAG')
+        seq2 = Sequence('CGCCTTCCACATCAG')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 1.0
+        self.assertEqual(obs, exp)
+
+    def test_same_sequence(self):
+        seq1 = Sequence('CTGCGACAGTTGGTA')
+        seq2 = Sequence('CTGCGACAGTTGGTA')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 0.0
+        self.assertEqual(obs, exp)
+
+    def test_differing_length_seqs(self):
+        seq1 = Sequence('AGAAATCTGAGCAAGGATCA')
+        seq2 = Sequence('TTAGTGCGTAATCCG')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 0.9285714285714286
+        self.assertAlmostEqual(obs, exp)
+
+    def test_with_sequence_subclass(self):
+        seq1 = DNA('GATGGTACTGTAGGT')
+        seq2 = DNA('AGGGTGAAGGTATCA')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 0.8421052631578947
+        self.assertAlmostEqual(obs, exp)
+
+    def test_with_metadata_sanity(self):
+        seq1 = Sequence('AACCTAGCAATGGAT',
+                        metadata={'Name': 'Kestrel Gorlick'},
+                        positional_metadata={'seq': list('ACTCAAGCTACGAAG')})
+        seq2 = Sequence('CAGGCAGTTCTCACC')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 0.9130434782608695
+        self.assertAlmostEqual(obs, exp)
+
+    def test_return_type(self):
+        seq1 = Sequence('ATCG')
+        seq2 = Sequence('ATCG')
+        obs = kmer_distance(seq1, seq2, 3)
+        self.assertIsInstance(obs, float)
+        self.assertEqual(obs, 0.0)
+
+    def test_empty_sequences(self):
+        seq1 = Sequence('')
+        seq2 = Sequence('')
+        obs = kmer_distance(seq1, seq2, 3)
+        npt.assert_equal(obs, np.nan)
+
+    def test_one_empty_sequence(self):
+        seq1 = Sequence('')
+        seq2 = Sequence('CGGGCAGCTCCTACCTGCTA')
+        obs = kmer_distance(seq1, seq2, 3)
+        exp = 1.0
+        self.assertAlmostEqual(obs, exp)
+
+    def test_no_kmers_found(self):
+        seq1 = Sequence('ATCG')
+        seq2 = Sequence('ACGT')
+        obs = kmer_distance(seq1, seq2, 5)
+        npt.assert_equal(obs, np.nan)
+
+    def test_k_less_than_one_error(self):
+        seq1 = Sequence('ATCG')
+        seq2 = Sequence('ACTG')
+        with self.assertRaisesRegex(ValueError, 'k must be greater than 0.'):
+            kmer_distance(seq1, seq2, 0)
+
+    def test_type_mismatch_error(self):
+        seq1 = Sequence('ABC')
+        seq2 = DNA('ATC')
+        with self.assertRaisesRegex(TypeError, "Type 'Sequence'.*type 'DNA'"):
+            kmer_distance(seq1, seq2, 3)
+
+    def test_non_sequence_error(self):
+        seq1 = Sequence('ATCG')
+        seq2 = 'ATCG'
+        with self.assertRaisesRegex(TypeError, "not 'str'"):
+            kmer_distance(seq1, seq2, 3)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/skbio/sequence/tests/test_dna.py b/skbio/sequence/tests/test_dna.py
index a076059..1141359 100644
--- a/skbio/sequence/tests/test_dna.py
+++ b/skbio/sequence/tests/test_dna.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import unittest
 
 from skbio import DNA, RNA
@@ -42,7 +39,7 @@ class TestDNA(unittest.TestCase):
         self.assertEqual(seq, DNA('ATAT'))
 
     def test_cannot_subclass(self):
-        with six.assertRaisesRegex(self, TypeError, "Subclassing disabled"):
+        with self.assertRaisesRegex(TypeError, "Subclassing disabled"):
             class CustomSequence(DNA):
                 pass
 
diff --git a/skbio/sequence/tests/test_genetic_code.py b/skbio/sequence/tests/test_genetic_code.py
index 4af0a45..d0607c8 100644
--- a/skbio/sequence/tests/test_genetic_code.py
+++ b/skbio/sequence/tests/test_genetic_code.py
@@ -6,12 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import itertools
 import unittest
 
-import six
 import numpy as np
 import numpy.testing as npt
 
@@ -35,9 +32,9 @@ class TestGeneticCode(unittest.TestCase):
                          'Candidate Division SR1 and Gracilibacteria')
 
     def test_from_ncbi_invalid_input(self):
-        with six.assertRaisesRegex(self, ValueError, 'table_id.*7'):
+        with self.assertRaisesRegex(ValueError, 'table_id.*7'):
             GeneticCode.from_ncbi(7)
-        with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
+        with self.assertRaisesRegex(ValueError, 'table_id.*42'):
             GeneticCode.from_ncbi(42)
 
     def test_reading_frames(self):
@@ -75,29 +72,27 @@ class TestGeneticCode(unittest.TestCase):
 
     def test_init_invalid_input(self):
         # `amino_acids` invalid protein
-        with six.assertRaisesRegex(self, ValueError, 'Invalid character.*J'):
+        with self.assertRaisesRegex(ValueError, 'Invalid character.*J'):
             GeneticCode('J' * 64, '-' * 64)
 
         # wrong number of amino acids
-        with six.assertRaisesRegex(self, ValueError, 'amino_acids.*64.*42'):
+        with self.assertRaisesRegex(ValueError, 'amino_acids.*64.*42'):
             GeneticCode('M' * 42, '-' * 64)
 
         # `amino_acids` missing M
-        with six.assertRaisesRegex(self, ValueError,
-                                   'amino_acids.*M.*character'):
+        with self.assertRaisesRegex(ValueError, 'amino_acids.*M.*character'):
             GeneticCode('A' * 64, '-' * 64)
 
         # `starts` invalid protein
-        with six.assertRaisesRegex(self, ValueError, 'Invalid character.*J'):
+        with self.assertRaisesRegex(ValueError, 'Invalid character.*J'):
             GeneticCode('M' * 64, 'J' * 64)
 
         # wrong number of starts
-        with six.assertRaisesRegex(self, ValueError, 'starts.*64.*42'):
+        with self.assertRaisesRegex(ValueError, 'starts.*64.*42'):
             GeneticCode('M' * 64, '-' * 42)
 
         # invalid characters in `starts`
-        with six.assertRaisesRegex(self, ValueError,
-                                   'starts.*M and - characters'):
+        with self.assertRaisesRegex(ValueError, 'starts.*M and - characters'):
             GeneticCode('M' * 64, '-M' * 30 + '*AQR')
 
     def test_str(self):
@@ -253,8 +248,8 @@ class TestGeneticCode(unittest.TestCase):
                 obs = self.sgc.translate(seq, start=start)
                 self.assertEqual(obs, exp)
 
-            with six.assertRaisesRegex(self, ValueError,
-                                       'reading_frame=1.*start=\'require\''):
+            with self.assertRaisesRegex(ValueError,
+                                        'reading_frame=1.*start=\'require\''):
                 self.sgc.translate(seq, start='require')
 
     def test_translate_start_with_start_codon(self):
@@ -297,8 +292,8 @@ class TestGeneticCode(unittest.TestCase):
             obs = self.sgc.translate(seq, start=start)
             self.assertEqual(obs, exp)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=1.*start=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=1.*start=\'require\''):
             self.sgc.translate(seq, start='require')
 
         # non-start codon that translates to an AA that start codons also map
@@ -310,8 +305,8 @@ class TestGeneticCode(unittest.TestCase):
             obs = self.sgc.translate(seq, start=start)
             self.assertEqual(obs, exp)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=1.*start=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=1.*start=\'require\''):
             self.sgc.translate(seq, start='require')
 
     def test_translate_start_no_accidental_mutation(self):
@@ -335,8 +330,8 @@ class TestGeneticCode(unittest.TestCase):
                 obs = self.sgc.translate(seq, stop=stop)
                 self.assertEqual(obs, exp)
 
-            with six.assertRaisesRegex(self, ValueError,
-                                       'reading_frame=1.*stop=\'require\''):
+            with self.assertRaisesRegex(ValueError,
+                                        'reading_frame=1.*stop=\'require\''):
                 self.sgc.translate(seq, stop='require')
 
     def test_translate_stop_with_stop_codon(self):
@@ -381,8 +376,8 @@ class TestGeneticCode(unittest.TestCase):
             obs = self.sgc.translate(seq, stop=stop)
             self.assertEqual(obs, exp)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=1.*stop=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=1.*stop=\'require\''):
             self.sgc.translate(seq, stop='require')
 
     def test_translate_trim_to_cds(self):
@@ -403,11 +398,11 @@ class TestGeneticCode(unittest.TestCase):
         # alternative reading frame disrupts cds:
         #     AAUUGCCUCAUUAAUAACAAUGA
         #     NCLINNN
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=2.*start=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=2.*start=\'require\''):
             self.sgc.translate(seq, reading_frame=2, start='require')
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=2.*stop=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=2.*stop=\'require\''):
             self.sgc.translate(seq, reading_frame=2, stop='require')
 
         exp = Protein('NCLINNN')
@@ -418,30 +413,29 @@ class TestGeneticCode(unittest.TestCase):
 
     def test_translate_invalid_input(self):
         # invalid sequence type
-        with six.assertRaisesRegex(self, TypeError, 'RNA.*DNA'):
+        with self.assertRaisesRegex(TypeError, 'RNA.*DNA'):
             self.sgc.translate(DNA('ACG'))
-        with six.assertRaisesRegex(self, TypeError, 'RNA.*str'):
+        with self.assertRaisesRegex(TypeError, 'RNA.*str'):
             self.sgc.translate('ACG')
 
         # invalid reading frame
-        with six.assertRaisesRegex(self, ValueError,
-                                   '\[1, 2, 3, -1, -2, -3\].*0'):
+        with self.assertRaisesRegex(ValueError, '\[1, 2, 3, -1, -2, -3\].*0'):
             self.sgc.translate(RNA('AUG'), reading_frame=0)
 
         # invalid start
-        with six.assertRaisesRegex(self, ValueError, 'start.*foo'):
+        with self.assertRaisesRegex(ValueError, 'start.*foo'):
             self.sgc.translate(RNA('AUG'), start='foo')
 
         # invalid stop
-        with six.assertRaisesRegex(self, ValueError, 'stop.*foo'):
+        with self.assertRaisesRegex(ValueError, 'stop.*foo'):
             self.sgc.translate(RNA('AUG'), stop='foo')
 
         # gapped sequence
-        with six.assertRaisesRegex(self, ValueError, 'gapped'):
+        with self.assertRaisesRegex(ValueError, 'gapped'):
             self.sgc.translate(RNA('UU-G'))
 
         # degenerate sequence
-        with six.assertRaisesRegex(self, NotImplementedError, 'degenerate'):
+        with self.assertRaisesRegex(NotImplementedError, 'degenerate'):
             self.sgc.translate(RNA('RUG'))
 
     def test_translate_varied_genetic_codes(self):
@@ -463,8 +457,8 @@ class TestGeneticCode(unittest.TestCase):
         obs = GeneticCode.from_ncbi(22).translate(seq)
         self.assertEqual(obs, exp)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=1.*start=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=1.*start=\'require\''):
             GeneticCode.from_ncbi(22).translate(seq, start='require',
                                                 stop='require')
 
@@ -474,8 +468,8 @@ class TestGeneticCode(unittest.TestCase):
         obs = gc.translate(seq)
         self.assertEqual(obs, exp)
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   'reading_frame=1.*start=\'require\''):
+        with self.assertRaisesRegex(ValueError,
+                                    'reading_frame=1.*start=\'require\''):
             gc.translate(seq, start='require', stop='require')
 
     def test_translate_six_frames(self):
diff --git a/skbio/sequence/tests/test_grammared_sequence.py b/skbio/sequence/tests/test_grammared_sequence.py
index 7edcc47..17a32d5 100644
--- a/skbio/sequence/tests/test_grammared_sequence.py
+++ b/skbio/sequence/tests/test_grammared_sequence.py
@@ -6,16 +6,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 from unittest import TestCase, main
 
 import numpy as np
 import numpy.testing as npt
+import pandas as pd
 
 from skbio.sequence import GrammaredSequence
 from skbio.util import classproperty
+from skbio.util import assert_data_frame_almost_equal
 
 
 class ExampleGrammaredSequence(GrammaredSequence):
@@ -24,7 +23,7 @@ class ExampleGrammaredSequence(GrammaredSequence):
         return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
 
     @classproperty
-    def nondegenerate_chars(cls):
+    def definite_chars(cls):
         return set("ABC")
 
     @classproperty
@@ -48,8 +47,8 @@ class ExampleMotifsTester(ExampleGrammaredSequence):
 
 class TestGrammaredSequence(TestCase):
     def test_default_gap_must_be_in_gap_chars(self):
-        with six.assertRaisesRegex(
-                self, TypeError,
+        with self.assertRaisesRegex(
+                TypeError,
                 "default_gap_char must be in gap_chars for class "
                 "GrammaredSequenceInvalidDefaultGap"):
 
@@ -58,11 +57,11 @@ class TestGrammaredSequence(TestCase):
                 def default_gap_char(cls):
                     return '*'
 
-    def test_degenerates_must_expand_to_valid_nondegenerates(self):
-        with six.assertRaisesRegex(
-                self, TypeError,
+    def test_degenerates_must_expand_to_valid_definites(self):
+        with self.assertRaisesRegex(
+                TypeError,
                 "degenerate_map must expand only to characters included in "
-                "nondegenerate_chars for class "
+                "definite_chars for class "
                 "GrammaredSequenceInvalidDefaultGap"):
 
             class GrammaredSequenceInvalidDefaultGap(ExampleGrammaredSequence):
@@ -71,12 +70,12 @@ class TestGrammaredSequence(TestCase):
                     return {"X": set("B")}
 
                 @classproperty
-                def nondegenerate_chars(cls):
+                def definite_chars(cls):
                     return set("A")
 
     def test_gap_chars_and_degenerates_share(self):
-        with six.assertRaisesRegex(
-                self, TypeError,
+        with self.assertRaisesRegex(
+                TypeError,
                 "gap_chars and degenerate_chars must not share any characters "
                 "for class GrammaredSequenceGapInDegenerateMap"):
 
@@ -87,37 +86,37 @@ class TestGrammaredSequence(TestCase):
                     return {"X": set("AB")}
 
                 @classproperty
-                def nondegenerate_chars(cls):
+                def definite_chars(cls):
                     return set("ABC")
 
                 @classproperty
                 def gap_chars(cls):
                     return set(".-X")
 
-    def test_gap_chars_and_nondegenerates_share(self):
-        with six.assertRaisesRegex(
-            self, TypeError,
-            ("gap_chars and nondegenerate_chars must not share any characters "
-             "for class GrammaredSequenceGapInNondegenerateMap")):
+    def test_gap_chars_and_definites_share(self):
+        with self.assertRaisesRegex(
+            TypeError,
+            ("gap_chars and definite_chars must not share any characters "
+             "for class GrammaredSequenceGapInDefiniteMap")):
 
-            class GrammaredSequenceGapInNondegenerateMap(
+            class GrammaredSequenceGapInDefiniteMap(
                     ExampleGrammaredSequence):
                 @classproperty
                 def degenerate_map(cls):
                     return {"X": set("AB")}
 
                 @classproperty
-                def nondegenerate_chars(cls):
+                def definite_chars(cls):
                     return set("ABC")
 
                 @classproperty
                 def gap_chars(cls):
                     return set(".-A")
 
-    def test_degenerates_and_nondegenerates_share(self):
-        with six.assertRaisesRegex(
-            self, TypeError,
-            ("degenerate_chars and nondegenerate_chars must not share any "
+    def test_degenerates_and_definites_share(self):
+        with self.assertRaisesRegex(
+            TypeError,
+            ("degenerate_chars and definite_chars must not share any "
              "characters for class GrammaredSequenceInvalid")):
 
             class GrammaredSequenceInvalid(ExampleGrammaredSequence):
@@ -126,7 +125,7 @@ class TestGrammaredSequence(TestCase):
                     return {"X": set("AB")}
 
                 @classproperty
-                def nondegenerate_chars(cls):
+                def definite_chars(cls):
                     return set("ABCX")
 
     def test_instantiation_with_no_implementation(self):
@@ -137,15 +136,16 @@ class TestGrammaredSequence(TestCase):
             GrammaredSequenceSubclassNoImplementation()
 
         self.assertIn("abstract class", str(cm.exception))
-        self.assertIn("nondegenerate_chars", str(cm.exception))
+        self.assertIn("definite_chars", str(cm.exception))
         self.assertIn("degenerate_map", str(cm.exception))
 
     def test_init_default_parameters(self):
         seq = ExampleGrammaredSequence('.-ABCXYZ')
 
         npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
-        self.assertFalse(seq.has_metadata())
-        self.assertFalse(seq.has_positional_metadata())
+        self.assertEqual(seq.metadata, {})
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame(index=range(8)))
 
     def test_init_nondefault_parameters(self):
         seq = ExampleGrammaredSequence(
@@ -154,11 +154,9 @@ class TestGrammaredSequence(TestCase):
             positional_metadata={'quality': range(8)})
 
         npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
-        self.assertTrue(seq.has_metadata())
-        self.assertEqual(seq.metadata['id'], 'foo')
-        self.assertTrue(seq.has_positional_metadata())
-        npt.assert_equal(seq.positional_metadata['quality'], np.array(range(8),
-                         dtype='int'))
+        self.assertEqual(seq.metadata, {'id': 'foo'})
+        assert_data_frame_almost_equal(seq.positional_metadata,
+                                       pd.DataFrame({'quality': range(8)}))
 
     def test_init_valid_empty_sequence(self):
         # just make sure we can instantiate an empty sequence regardless of
@@ -187,7 +185,7 @@ class TestGrammaredSequence(TestCase):
     def test_init_validate_parameter_single_character(self):
         seq = 'w'
 
-        with six.assertRaisesRegex(self, ValueError, "character.*'w'"):
+        with self.assertRaisesRegex(ValueError, "character.*'w'"):
             ExampleGrammaredSequence(seq)
 
         # test that we can instantiate an invalid sequence. we don't guarantee
@@ -199,7 +197,7 @@ class TestGrammaredSequence(TestCase):
         # alphabet characters
         seq = 'CBCBBbawCbbwBXYZ-.x'
 
-        with six.assertRaisesRegex(self, ValueError, "\['a', 'b', 'w', 'x'\]"):
+        with self.assertRaisesRegex(ValueError, "\['a', 'b', 'w', 'x'\]"):
             ExampleGrammaredSequence(seq)
 
         ExampleGrammaredSequence(seq, validate=False)
@@ -207,8 +205,8 @@ class TestGrammaredSequence(TestCase):
     def test_init_lowercase_all_lowercase(self):
         s = 'cbcbbbazcbbzbxyz-.x'
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   "\['a', 'b', 'c', 'x', 'y', 'z'\]"):
+        with self.assertRaisesRegex(ValueError,
+                                    "\['a', 'b', 'c', 'x', 'y', 'z'\]"):
             ExampleGrammaredSequence(s)
 
         seq = ExampleGrammaredSequence(s, lowercase=True)
@@ -217,7 +215,7 @@ class TestGrammaredSequence(TestCase):
     def test_init_lowercase_mixed_case(self):
         s = 'CBCBBbazCbbzBXYZ-.x'
 
-        with six.assertRaisesRegex(self, ValueError, "\['a', 'b', 'x', 'z'\]"):
+        with self.assertRaisesRegex(ValueError, "\['a', 'b', 'x', 'z'\]"):
             ExampleGrammaredSequence(s)
 
         seq = ExampleGrammaredSequence(s, lowercase=True)
@@ -226,10 +224,10 @@ class TestGrammaredSequence(TestCase):
     def test_init_lowercase_no_validation(self):
         s = 'car'
 
-        with six.assertRaisesRegex(self, ValueError, "\['a', 'c', 'r'\]"):
+        with self.assertRaisesRegex(ValueError, "\['a', 'c', 'r'\]"):
             ExampleGrammaredSequence(s)
 
-        with six.assertRaisesRegex(self, ValueError, "character.*'R'"):
+        with self.assertRaisesRegex(ValueError, "character.*'R'"):
             ExampleGrammaredSequence(s, lowercase=True)
 
         ExampleGrammaredSequence(s, lowercase=True, validate=False)
@@ -237,7 +235,7 @@ class TestGrammaredSequence(TestCase):
     def test_init_lowercase_byte_ownership(self):
         bytes = np.array([97, 98, 97], dtype=np.uint8)
 
-        with six.assertRaisesRegex(self, ValueError, "\['a', 'b'\]"):
+        with self.assertRaisesRegex(ValueError, "\['a', 'b'\]"):
             ExampleGrammaredSequence(bytes)
 
         seq = ExampleGrammaredSequence(bytes, lowercase=True)
@@ -253,10 +251,10 @@ class TestGrammaredSequence(TestCase):
     def test_init_lowercase_invalid_keys(self):
         for invalid_key in ((), [], 2):
             invalid_type = type(invalid_key)
-            with six.assertRaisesRegex(self, TypeError,
-                                       "lowercase keyword argument expected "
-                                       "a bool or string, but got %s" %
-                                       invalid_type):
+            with self.assertRaisesRegex(TypeError,
+                                        "lowercase keyword argument expected "
+                                        "a bool or string, but got %s" %
+                                        invalid_type):
                 ExampleGrammaredSequence('ACGTacgt', lowercase=invalid_key)
 
     def test_degenerate_chars(self):
@@ -273,6 +271,8 @@ class TestGrammaredSequence(TestCase):
         with self.assertRaises(AttributeError):
             ExampleGrammaredSequence('').degenerate_chars = set("BAR")
 
+    # TODO: duplicate of test_definite_chars, remove when nondegenerate_chars,
+    # is removed
     def test_nondegenerate_chars(self):
         expected = set("ABC")
         self.assertEqual(ExampleGrammaredSequence.nondegenerate_chars,
@@ -288,6 +288,21 @@ class TestGrammaredSequence(TestCase):
         with self.assertRaises(AttributeError):
             ExampleGrammaredSequence('').nondegenerate_chars = set("BAR")
 
+    def test_definite_chars(self):
+        expected = set("ABC")
+        self.assertEqual(ExampleGrammaredSequence.definite_chars,
+                         expected)
+
+        ExampleGrammaredSequence.degenerate_chars.add("D")
+        self.assertEqual(ExampleGrammaredSequence.definite_chars,
+                         expected)
+
+        self.assertEqual(ExampleGrammaredSequence('').definite_chars,
+                         expected)
+
+        with self.assertRaises(AttributeError):
+            ExampleGrammaredSequence('').definite_chars = set("BAR")
+
     def test_gap_chars(self):
         expected = set(".-")
         self.assertIs(type(ExampleGrammaredSequence.gap_chars), set)
@@ -397,6 +412,7 @@ class TestGrammaredSequence(TestCase):
         self.assertTrue(ExampleGrammaredSequence("Z").has_degenerates())
         self.assertTrue(ExampleGrammaredSequence("ABC.XYZ-").has_degenerates())
 
+    # TODO: duplicate of test_definites; remove when nondegenerates is removed
     def test_nondegenerates(self):
         self.assertIs(type(ExampleGrammaredSequence("").nondegenerates()),
                       np.ndarray)
@@ -421,6 +437,32 @@ class TestGrammaredSequence(TestCase):
         npt.assert_equal(ExampleGrammaredSequence("YB.-AC").nondegenerates(),
                          np.array([0, 1, 0, 0, 1, 1], dtype=bool))
 
+    def test_definites(self):
+        self.assertIs(type(ExampleGrammaredSequence("").definites()),
+                      np.ndarray)
+        self.assertIs(ExampleGrammaredSequence("").definites().dtype,
+                      np.dtype('bool'))
+
+        npt.assert_equal(
+            ExampleGrammaredSequence("XYZYZ-.XY.").definites(),
+            np.zeros(10).astype(bool))
+
+        npt.assert_equal(ExampleGrammaredSequence("ABABA").definites(),
+                         np.ones(5).astype(bool))
+
+        npt.assert_equal(
+            ExampleGrammaredSequence("XA.B-AZCXA").definites(),
+            np.array([0, 1] * 5, dtype=bool))
+
+        npt.assert_equal(
+            ExampleGrammaredSequence("XXAZZB.-C").definites(),
+            np.array([0, 0, 1] * 3, dtype=bool))
+
+        npt.assert_equal(ExampleGrammaredSequence("YB.-AC").definites(),
+                         np.array([0, 1, 0, 0, 1, 1], dtype=bool))
+
+    # TODO: duplicate of test_has_definites; remove when has_nondegenerates is
+    # removed.
     def test_has_nondegenerates(self):
         self.assertIs(type(ExampleGrammaredSequence("").has_nondegenerates()),
                       bool)
@@ -435,6 +477,20 @@ class TestGrammaredSequence(TestCase):
         self.assertTrue(
             ExampleGrammaredSequence(".XYZ-ABC").has_nondegenerates())
 
+    def test_has_definites(self):
+        self.assertIs(type(ExampleGrammaredSequence("").has_definites()),
+                      bool)
+        self.assertIs(type(ExampleGrammaredSequence("A").has_definites()),
+                      bool)
+
+        self.assertFalse(ExampleGrammaredSequence("").has_definites())
+        self.assertFalse(
+            ExampleGrammaredSequence("X-.YZ").has_definites())
+
+        self.assertTrue(ExampleGrammaredSequence("C").has_definites())
+        self.assertTrue(
+            ExampleGrammaredSequence(".XYZ-ABC").has_definites())
+
     def test_degap(self):
         kw = {
             'metadata': {
@@ -564,24 +620,24 @@ class TestGrammaredSequence(TestCase):
         self.assertIn('length: 0', obs)
         self.assertIn('has gaps: False', obs)
         self.assertIn('has degenerates: False', obs)
-        self.assertIn('has non-degenerates: False', obs)
+        self.assertIn('has definites: False', obs)
         self.assertTrue(obs.endswith('-'))
 
-        # no metadata, mix of gaps, degenerates, and non-degenerates
+        # no metadata, mix of gaps, degenerates, and definites
         obs = repr(ExampleGrammaredSequence('AY-B'))
         self.assertEqual(obs.count('\n'), 8)
         self.assertTrue(obs.startswith('ExampleGrammaredSequence'))
         self.assertIn('length: 4', obs)
         self.assertIn('has gaps: True', obs)
         self.assertIn('has degenerates: True', obs)
-        self.assertIn('has non-degenerates: True', obs)
+        self.assertIn('has definites: True', obs)
         self.assertTrue(obs.endswith('0 AY-B'))
 
         # metadata and positional metadata of mixed types
         obs = repr(
             ExampleGrammaredSequence(
                 'ABCA',
-                metadata={'foo': 42, u'bar': 33.33, None: True, False: {},
+                metadata={'foo': 42, b'bar': 33.33, None: True, False: {},
                           (1, 2): 3, 'acb' * 100: "'"},
                 positional_metadata={'foo': range(4),
                                      42: ['a', 'b', [], 'c']}))
@@ -594,7 +650,7 @@ class TestGrammaredSequence(TestCase):
         self.assertIn('length: 4', obs)
         self.assertIn('has gaps: False', obs)
         self.assertIn('has degenerates: False', obs)
-        self.assertIn('has non-degenerates: True', obs)
+        self.assertIn('has definites: True', obs)
         self.assertTrue(obs.endswith('0 ABCA'))
 
         # sequence spanning > 5 lines
@@ -604,7 +660,7 @@ class TestGrammaredSequence(TestCase):
         self.assertIn('length: 301', obs)
         self.assertIn('has gaps: False', obs)
         self.assertIn('has degenerates: False', obs)
-        self.assertIn('has non-degenerates: True', obs)
+        self.assertIn('has definites: True', obs)
         self.assertIn('...', obs)
         self.assertTrue(obs.endswith('300 A'))
 
diff --git a/skbio/sequence/tests/test_nucleotide_sequences.py b/skbio/sequence/tests/test_nucleotide_sequences.py
index 367c794..f83be6f 100644
--- a/skbio/sequence/tests/test_nucleotide_sequences.py
+++ b/skbio/sequence/tests/test_nucleotide_sequences.py
@@ -6,11 +6,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import unittest
 
-import six
 import numpy as np
 
 from skbio import DNA, RNA, Protein, GeneticCode
@@ -68,6 +65,7 @@ class TestNucelotideSequence(unittest.TestCase):
         self.assertIn("abstract class", str(cm.exception))
         self.assertIn("complement_map", str(cm.exception))
 
+    # TODO: remove when nondegenerate_chars is removed
     def test_nondegenerate_chars(self):
         dna = (DNA, "ACGT")
         rna = (RNA, "ACGU")
@@ -76,6 +74,14 @@ class TestNucelotideSequence(unittest.TestCase):
             self.assertEqual(constructor('').nondegenerate_chars, exp)
             self.assertEqual(constructor.nondegenerate_chars, exp)
 
+    def test_definite_chars(self):
+        dna = (DNA, "ACGT")
+        rna = (RNA, "ACGU")
+        for constructor, definite_char in (dna, rna):
+            exp = set(definite_char)
+            self.assertEqual(constructor('').definite_chars, exp)
+            self.assertEqual(constructor.definite_chars, exp)
+
     def test_degenerate_map(self):
         dna_exp = (DNA, {
             'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
@@ -167,7 +173,7 @@ class TestNucelotideSequence(unittest.TestCase):
 
     def test_translate_invalid_id(self):
         for seq in RNA('AUG'), DNA('ATG'):
-            with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
+            with self.assertRaisesRegex(ValueError, 'table_id.*42'):
                 seq.translate(42)
 
     def test_translate_six_frames_ncbi_table_id(self):
@@ -222,7 +228,7 @@ class TestNucelotideSequence(unittest.TestCase):
 
     def test_translate_six_frames_invalid_id(self):
         for seq in RNA('AUG'), DNA('ATG'):
-            with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
+            with self.assertRaisesRegex(ValueError, 'table_id.*42'):
                 seq.translate_six_frames(42)
 
     def test_repr(self):
@@ -416,7 +422,7 @@ class TestNucelotideSequence(unittest.TestCase):
                     return {"X": set("AB")}
 
                 @classproperty
-                def nondegenerate_chars(cls):
+                def definite_chars(cls):
                     return set("ABC")
 
                 @classproperty
@@ -430,9 +436,9 @@ class TestNucelotideSequence(unittest.TestCase):
             seq1 = Class('ABC')
             seq2 = DifferentSequenceClass('ABC')
 
-            with six.assertRaisesRegex(self, TypeError,
-                                       "Cannot use.*and "
-                                       "DifferentSequenceClass together"):
+            with self.assertRaisesRegex(TypeError,
+                                        "Cannot use.*and "
+                                        "DifferentSequenceClass together"):
                 seq1.is_reverse_complement(seq2)
 
     def test_motif_purine_run(self):
diff --git a/skbio/sequence/tests/test_protein.py b/skbio/sequence/tests/test_protein.py
index 7af26ef..668d5ca 100644
--- a/skbio/sequence/tests/test_protein.py
+++ b/skbio/sequence/tests/test_protein.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import unittest
 
 import numpy as np
@@ -30,11 +27,18 @@ class TestProtein(unittest.TestCase):
         with self.assertRaises(AttributeError):
             Protein('').alphabet = set("ABCD")
 
+    # TODO: duplicate of test_definite_chars, remove when nondegenerate_chars,
+    # is removed
     def test_nondegenerate_chars(self):
         exp = set("ACDEFGHIKLMNPQRSTVWY")
         self.assertEqual(Protein("").nondegenerate_chars, exp)
         self.assertEqual(Protein.nondegenerate_chars, exp)
 
+    def test_definite_chars(self):
+        exp = set("ACDEFGHIKLMNPQRSTVWY")
+        self.assertEqual(Protein("").definite_chars, exp)
+        self.assertEqual(Protein.definite_chars, exp)
+
     def test_degenerate_map(self):
         exp = {
             'B': set(['D', 'N']), 'Z': set(['E', 'Q']),
@@ -121,7 +125,7 @@ class TestProtein(unittest.TestCase):
         self.assertIn('has stops: True', obs)
 
     def test_cannot_subclass(self):
-        with six.assertRaisesRegex(self, TypeError, "Subclassing disabled"):
+        with self.assertRaisesRegex(TypeError, "Subclassing disabled"):
             class CustomSequence(Protein):
                 pass
 
diff --git a/skbio/sequence/tests/test_rna.py b/skbio/sequence/tests/test_rna.py
index ebefb55..99dd331 100644
--- a/skbio/sequence/tests/test_rna.py
+++ b/skbio/sequence/tests/test_rna.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 import unittest
 
 from skbio import DNA, RNA
@@ -42,7 +39,7 @@ class TestRNA(unittest.TestCase):
         self.assertEqual(seq, RNA('AUAU'))
 
     def test_cannot_subclass(self):
-        with six.assertRaisesRegex(self, TypeError, "Subclassing disabled"):
+        with self.assertRaisesRegex(TypeError, "Subclassing disabled"):
             class CustomSequence(RNA):
                 pass
 
diff --git a/skbio/sequence/tests/test_sequence.py b/skbio/sequence/tests/test_sequence.py
index 477e657..89356fb 100644
--- a/skbio/sequence/tests/test_sequence.py
+++ b/skbio/sequence/tests/test_sequence.py
@@ -6,10 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-from six.moves import zip_longest
-
 import copy
 import functools
 import itertools
@@ -28,8 +24,9 @@ from skbio import Sequence, DNA
 from skbio.util import assert_data_frame_almost_equal
 from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
                                       _as_slice_if_single_index)
-from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
-                                 PositionalMetadataMixinTests)
+from skbio.util._testing import ReallyEqualMixin
+from skbio.metadata._testing import (MetadataMixinTests,
+                                     PositionalMetadataMixinTests)
 
 
 class SequenceSubclass(Sequence):
@@ -70,8 +67,7 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         self.lowercase_seq = Sequence('AAAAaaaa', lowercase='key')
 
         def empty_generator():
-            return
-            yield
+            yield from ()
 
         self.getitem_empty_indices = [
             [],
@@ -112,8 +108,8 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         result1 = Sequence.concat([seq1, seq2])
         result2 = Sequence.concat([seq1, seq2], how='strict')
         self.assertEqual(result1, result2)
-        with six.assertRaisesRegex(self, ValueError,
-                                   '.*positional.*metadata.*inner.*outer.*'):
+        with self.assertRaisesRegex(ValueError,
+                                    '.*positional.*metadata.*inner.*outer.*'):
             Sequence.concat([seq1, seq2, seqbad])
 
     def test_concat_strict_simple(self):
@@ -123,7 +119,7 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         seq2 = Sequence("5678", positional_metadata={'a': [2]*4})
         result = Sequence.concat([seq1, seq2], how='strict')
         self.assertEqual(result, expected)
-        self.assertFalse(result.has_metadata())
+        self.assertFalse(result.metadata)
 
     def test_concat_strict_many(self):
         odd_key = frozenset()
@@ -138,13 +134,13 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
                 Sequence("9", positional_metadata={'a': ['o'], odd_key: [5]})
             ], how='strict')
         self.assertEqual(result, expected)
-        self.assertFalse(result.has_metadata())
+        self.assertFalse(result.metadata)
 
     def test_concat_strict_fail(self):
         seq1 = Sequence("1", positional_metadata={'a': [1]})
         seq2 = Sequence("2", positional_metadata={'b': [2]})
-        with six.assertRaisesRegex(self, ValueError,
-                                   '.*positional.*metadata.*inner.*outer.*'):
+        with self.assertRaisesRegex(ValueError,
+                                    '.*positional.*metadata.*inner.*outer.*'):
             Sequence.concat([seq1, seq2], how='strict')
 
     def test_concat_outer_simple(self):
@@ -152,7 +148,7 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         seq2 = Sequence("5678")
         result = Sequence.concat([seq1, seq2], how='outer')
         self.assertEqual(result, Sequence("12345678"))
-        self.assertFalse(result.has_metadata())
+        self.assertFalse(result.metadata)
 
     def test_concat_outer_missing(self):
         a = {}
@@ -173,14 +169,14 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
                                       np.nan, np.nan, np.nan, np.nan]
                             })
         self.assertEqual(result, expected)
-        self.assertFalse(result.has_metadata())
+        self.assertFalse(result.metadata)
 
     def test_concat_inner_simple(self):
         seq1 = Sequence("1234")
         seq2 = Sequence("5678", positional_metadata={'discarded': [1] * 4})
         result = Sequence.concat([seq1, seq2], how='inner')
         self.assertEqual(result, Sequence("12345678"))
-        self.assertFalse(result.has_metadata())
+        self.assertFalse(result.metadata)
 
     def test_concat_inner_missing(self):
         seq1 = Sequence("12", positional_metadata={'a': ['1', '2'],
@@ -192,18 +188,17 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         expected = Sequence("123456", positional_metadata={'a': ['1', '2', 3,
                                                                  4, 5, 6]})
         self.assertEqual(result, expected)
-        self.assertFalse(result.has_metadata())
+        self.assertFalse(result.metadata)
 
     def test_init_default_parameters(self):
         seq = Sequence('.ABC123xyz-')
 
         npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
         self.assertEqual('.ABC123xyz-', str(seq))
-        self.assertFalse(seq.has_metadata())
+        self.assertFalse(seq.metadata)
         self.assertEqual(seq.metadata, {})
-        self.assertFalse(seq.has_positional_metadata())
         assert_data_frame_almost_equal(seq.positional_metadata,
-                                       pd.DataFrame(index=np.arange(11)))
+                                       pd.DataFrame(index=range(11)))
 
     def test_init_nondefault_parameters(self):
         seq = Sequence('.ABC123xyz-',
@@ -213,33 +208,17 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
         self.assertEqual('.ABC123xyz-', str(seq))
 
-        self.assertTrue(seq.has_metadata())
+        self.assertTrue(seq.metadata)
         self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'})
 
-        self.assertTrue(seq.has_positional_metadata())
         assert_data_frame_almost_equal(
             seq.positional_metadata,
-            pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
-
-    def test_init_from_sequence_handles_missing_metadata_efficiently(self):
-        # initializing from an existing Sequence object should handle metadata
-        # attributes efficiently on both objects
-        seq = Sequence('ACGT')
-        new_seq = Sequence(seq)
-        self.assertIsNone(seq._metadata)
-        self.assertIsNone(seq._positional_metadata)
-        self.assertIsNone(new_seq._metadata)
-        self.assertIsNone(new_seq._positional_metadata)
-
-        self.assertFalse(seq.has_metadata())
-        self.assertFalse(seq.has_positional_metadata())
-        self.assertFalse(new_seq.has_metadata())
-        self.assertFalse(new_seq.has_positional_metadata())
+            pd.DataFrame({'quality': range(11)}, index=range(11)))
 
     def test_init_empty_sequence(self):
         # Test constructing an empty sequence using each supported input type.
         for s in (b'',  # bytes
-                  u'',  # unicode
+                  '',  # unicode
                   np.array('', dtype='c'),  # char vector
                   np.fromstring('', dtype=np.uint8),  # byte vec
                   Sequence('')):  # another Sequence object
@@ -252,16 +231,15 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
             self.assertEqual(str(seq), '')
             self.assertEqual(len(seq), 0)
 
-            self.assertFalse(seq.has_metadata())
+            self.assertFalse(seq.metadata)
             self.assertEqual(seq.metadata, {})
 
-            self.assertFalse(seq.has_positional_metadata())
             assert_data_frame_almost_equal(seq.positional_metadata,
-                                           pd.DataFrame(index=np.arange(0)))
+                                           pd.DataFrame(index=range(0)))
 
     def test_init_single_character_sequence(self):
         for s in (b'A',
-                  u'A',
+                  'A',
                   np.array('A', dtype='c'),
                   np.fromstring('A', dtype=np.uint8),
                   Sequence('A')):
@@ -274,16 +252,15 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
             self.assertEqual(str(seq), 'A')
             self.assertEqual(len(seq), 1)
 
-            self.assertFalse(seq.has_metadata())
+            self.assertFalse(seq.metadata)
             self.assertEqual(seq.metadata, {})
 
-            self.assertFalse(seq.has_positional_metadata())
             assert_data_frame_almost_equal(seq.positional_metadata,
-                                           pd.DataFrame(index=np.arange(1)))
+                                           pd.DataFrame(index=range(1)))
 
     def test_init_multiple_character_sequence(self):
         for s in (b'.ABC\t123  xyz-',
-                  u'.ABC\t123  xyz-',
+                  '.ABC\t123  xyz-',
                   np.array('.ABC\t123  xyz-', dtype='c'),
                   np.fromstring('.ABC\t123  xyz-', dtype=np.uint8),
                   Sequence('.ABC\t123  xyz-')):
@@ -297,12 +274,11 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
             self.assertEqual(str(seq), '.ABC\t123  xyz-')
             self.assertEqual(len(seq), 14)
 
-            self.assertFalse(seq.has_metadata())
+            self.assertFalse(seq.metadata)
             self.assertEqual(seq.metadata, {})
 
-            self.assertFalse(seq.has_positional_metadata())
             assert_data_frame_almost_equal(seq.positional_metadata,
-                                           pd.DataFrame(index=np.arange(14)))
+                                           pd.DataFrame(index=range(14)))
 
     def test_init_from_sequence_object(self):
         # We're testing this in its simplest form in other tests. This test
@@ -415,30 +391,30 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
             Sequence(np.array([1, {}, ()]))
 
         # invalid input type (non-numpy.ndarray input)
-        with six.assertRaisesRegex(self, TypeError, 'tuple'):
+        with self.assertRaisesRegex(TypeError, 'tuple'):
             Sequence(('a', 'b', 'c'))
-        with six.assertRaisesRegex(self, TypeError, 'list'):
+        with self.assertRaisesRegex(TypeError, 'list'):
             Sequence(['a', 'b', 'c'])
-        with six.assertRaisesRegex(self, TypeError, 'set'):
+        with self.assertRaisesRegex(TypeError, 'set'):
             Sequence({'a', 'b', 'c'})
-        with six.assertRaisesRegex(self, TypeError, 'dict'):
+        with self.assertRaisesRegex(TypeError, 'dict'):
             Sequence({'a': 42, 'b': 43, 'c': 44})
-        with six.assertRaisesRegex(self, TypeError, 'int'):
+        with self.assertRaisesRegex(TypeError, 'int'):
             Sequence(42)
-        with six.assertRaisesRegex(self, TypeError, 'float'):
+        with self.assertRaisesRegex(TypeError, 'float'):
             Sequence(4.2)
-        with six.assertRaisesRegex(self, TypeError, 'int64'):
+        with self.assertRaisesRegex(TypeError, 'int64'):
             Sequence(np.int_(50))
-        with six.assertRaisesRegex(self, TypeError, 'float64'):
+        with self.assertRaisesRegex(TypeError, 'float64'):
             Sequence(np.float_(50))
-        with six.assertRaisesRegex(self, TypeError, 'Foo'):
-            class Foo(object):
+        with self.assertRaisesRegex(TypeError, 'Foo'):
+            class Foo:
                 pass
             Sequence(Foo())
 
         # out of ASCII range
         with self.assertRaises(UnicodeEncodeError):
-            Sequence(u'abc\u1F30')
+            Sequence('abc\u1F30')
 
     def test_values_property(self):
         # Property tests are only concerned with testing the interface
@@ -733,6 +709,14 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         self.assertEqual(seq[0:4, :-4:-1, 9], eseq)
         self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
 
+    def test_getitem_with_tuple_of_mixed_no_metadata(self):
+        seq = Sequence("0123456789abcdef")
+        eseq = Sequence("0123fed9")
+        self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
+        self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
+        self.assertEqual(seq[0:4, :-4:-1, 9], eseq)
+        self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
+
     def test_getitem_with_iterable_of_mixed_has_positional_metadata(self):
         s = "0123456789abcdef"
         length = len(s)
@@ -875,32 +859,6 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         with self.assertRaises(Exception):
             seq[100 * [True, False, True]]
 
-    def test_getitem_handles_missing_metadata_efficiently(self):
-        # there are two paths in __getitem__ we need to test for efficient
-        # handling of missing metadata
-
-        # path 1: mixed types
-        seq = Sequence('ACGT')
-        subseq = seq[1, 2:4]
-        self.assertEqual(subseq, Sequence('CGT'))
-
-        # metadata attributes should be None and not initialized to a "missing"
-        # representation
-        self.assertIsNone(seq._metadata)
-        self.assertIsNone(seq._positional_metadata)
-        self.assertIsNone(subseq._metadata)
-        self.assertIsNone(subseq._positional_metadata)
-
-        # path 2: uniform types
-        seq = Sequence('ACGT')
-        subseq = seq[1:3]
-        self.assertEqual(subseq, Sequence('CG'))
-
-        self.assertIsNone(seq._metadata)
-        self.assertIsNone(seq._positional_metadata)
-        self.assertIsNone(subseq._metadata)
-        self.assertIsNone(subseq._positional_metadata)
-
     def test_getitem_empty_positional_metadata(self):
         seq = Sequence('ACGT')
         seq.positional_metadata  # This will create empty positional_metadata
@@ -998,8 +956,8 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         # basic sanity checks -- more extensive testing of formatting and
         # special cases is performed in SequenceReprDoctests below. here we
         # only test that pieces of the repr are present. these tests also
-        # exercise coverage for py2/3 since the doctests in
-        # SequenceReprDoctests only currently run in py3.
+        # exercise coverage in case doctests stop counting towards coverage in
+        # the future
 
         # minimal
         obs = repr(Sequence(''))
@@ -1019,7 +977,7 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         obs = repr(
             Sequence(
                 'ACGT',
-                metadata={'foo': 'bar', u'bar': 33.33, None: True, False: {},
+                metadata={'foo': 'bar', b'bar': 33.33, None: True, False: {},
                           (1, 2): 3, 'acb' * 100: "'", 10: 11},
                 positional_metadata={'foo': range(4),
                                      42: ['a', 'b', [], 'c']}))
@@ -1050,80 +1008,6 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
             "ABC")
         self.assertIs(type(str(Sequence("A"))), str)
 
-    def test_to_default_behavior(self):
-        # minimal sequence, sequence with all optional attributes present, and
-        # a subclass of Sequence
-        for seq in (Sequence('ACGT'),
-                    Sequence('ACGT', metadata={'id': 'foo', 'desc': 'bar'},
-                             positional_metadata={'quality': range(4)}),
-                    SequenceSubclass('ACGU', metadata={'id': 'rna seq'})):
-            to = seq._to()
-            self.assertEqual(seq, to)
-            self.assertIsNot(seq, to)
-
-    def test_to_update_single_attribute(self):
-        seq = Sequence('HE..--..LLO',
-                       metadata={'id': 'hello', 'description': 'gapped hello'},
-                       positional_metadata={'quality': range(11)})
-
-        to = seq._to(metadata={'id': 'new id'})
-        self.assertIsNot(seq, to)
-        self.assertNotEqual(seq, to)
-        self.assertEqual(
-            to,
-            Sequence('HE..--..LLO', metadata={'id': 'new id'},
-                     positional_metadata={'quality': range(11)}))
-
-        # metadata shouldn't have changed on the original sequence
-        self.assertEqual(seq.metadata,
-                         {'id': 'hello', 'description': 'gapped hello'})
-
-    def test_to_update_multiple_attributes(self):
-        seq = Sequence('HE..--..LLO',
-                       metadata={'id': 'hello', 'description': 'gapped hello'},
-                       positional_metadata={'quality': range(11)})
-
-        to = seq._to(metadata={'id': 'new id', 'description': 'new desc'},
-                     positional_metadata={'quality': range(20, 25)},
-                     sequence='ACGTA')
-        self.assertIsNot(seq, to)
-        self.assertNotEqual(seq, to)
-
-        # attributes should be what we specified in the _to call...
-        self.assertEqual(to.metadata['id'], 'new id')
-        npt.assert_array_equal(to.positional_metadata['quality'],
-                               np.array([20, 21, 22, 23, 24]))
-        npt.assert_array_equal(to.values, np.array('ACGTA', dtype='c'))
-        self.assertEqual(to.metadata['description'], 'new desc')
-
-        # ...and shouldn't have changed on the original sequence
-        self.assertEqual(seq.metadata['id'], 'hello')
-        npt.assert_array_equal(seq.positional_metadata['quality'], range(11))
-        npt.assert_array_equal(seq.values, np.array('HE..--..LLO',
-                                                    dtype='c'))
-        self.assertEqual(seq.metadata['description'], 'gapped hello')
-
-    def test_to_invalid_kwargs(self):
-        seq = Sequence('ACCGGTACC', metadata={'id': "test-seq",
-                       'desc': "A test sequence"})
-
-        with self.assertRaises(TypeError):
-            seq._to(metadata={'id': 'bar'}, unrecognized_kwarg='baz')
-
-    def test_to_no_positional_metadata(self):
-        seq = Sequence('ACGT')
-        seq.positional_metadata  # This will create empty positional metadata
-        result = seq._to(sequence='TGA')
-        self.assertIsNone(result._positional_metadata)
-        self.assertEqual(result, Sequence('TGA'))
-
-    def test_to_no_metadata(self):
-        seq = Sequence('ACGT')
-        seq.metadata  # This will create empty metadata
-        result = seq._to(sequence='TGA')
-        self.assertIsNone(result._metadata)
-        self.assertEqual(result, Sequence('TGA'))
-
     def test_count(self):
         def construct_char_array(s):
             return np.fromstring(s, dtype='|S1')
@@ -1156,6 +1040,102 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         self.assertIn("Sequence", str(cm.exception))
         self.assertIn("SequenceSubclass", str(cm.exception))
 
+    def test_replace_sanity(self):
+        seq = Sequence('AAGCATGCCCTTTACATTTG')
+        index = self._make_index('10011011001111110111')
+        obs = seq.replace(index, '_')
+        exp = Sequence('_AG__T__CC______T___')
+        self.assertEqual(obs, exp)
+
+    def test_replace_index_array(self):
+        seq = Sequence('TCGGGTGTTGTGCAACCACC')
+        for _type in list, tuple, np.array, pd.Series:
+            index = _type([0, 2, 5, 8, 9])
+            obs = seq.replace(index, '-')
+            exp = Sequence('-C-GG-GT--TGCAACCACC')
+            self.assertEqual(obs, exp)
+
+    def test_replace_iterable_slices(self):
+        seq = Sequence('CATTATGGACCCAGCGTGCC')
+        slices = (slice(0, 5), slice(8, 12), slice(15, 17))
+        mixed_slices = (0, 1, 2, 3, 4, slice(8, 12), 15, 16)
+        for _type in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
+                      lambda x: pd.Series(tuple(x))):
+            index = (_type(slices), _type(mixed_slices))
+            obs_slices = seq.replace(index[0], '-')
+            obs_mixed = seq.replace(index[1], '-')
+            exp = Sequence('-----TGG----AGC--GCC')
+            self.assertEqual(obs_slices, exp)
+            self.assertEqual(obs_mixed, exp)
+
+    def test_replace_index_in_positional_metadata(self):
+        positional_metadata = {'where': self._make_index('001110110'
+                                                         '10001110000')}
+        seq = Sequence('AAGATTGATACCACAGTTGT',
+                       positional_metadata=positional_metadata)
+        obs = seq.replace('where', '-')
+        exp = Sequence('AA---T--T-CCA---TTGT',
+                       positional_metadata=positional_metadata)
+        self.assertEqual(obs, exp)
+
+    def test_replace_does_not_mutate_original(self):
+        seq = Sequence('ATCG')
+        index = self._make_index('0011')
+        seq.replace(index, '-')
+        obs = seq
+        exp = Sequence('ATCG')
+        self.assertEqual(obs, exp)
+
+    def test_replace_with_metadata(self):
+        seq = Sequence('GCACGGCAAGAAGCGCCCCA',
+                       metadata={'NM': 'Kestrel Gorlick'},
+                       positional_metadata={'diff':
+                                            list('01100001110010001100')})
+        index = self._make_index('01100001110010001100')
+        obs = seq.replace(index, '-')
+        exp = Sequence('G--CGGC---AA-CGC--CA',
+                       metadata={'NM': 'Kestrel Gorlick'},
+                       positional_metadata={'diff':
+                                            list('01100001110010001100')})
+        self.assertEqual(obs, exp)
+
+    def test_replace_with_subclass(self):
+        seq = DNA('CGACAACCGATGTGCTGTAA')
+        index = self._make_index('10101000111111110011')
+        obs = seq.replace(index, '-')
+        exp = DNA('-G-C-ACC--------GT--')
+        self.assertEqual(obs, exp)
+
+    def test_replace_with_bytes(self):
+        seq = Sequence('ABC123')
+
+        obs = seq.replace([1, 3, 5], b'*')
+
+        self.assertEqual(obs, Sequence('A*C*2*'))
+
+    def test_replace_invalid_char_for_type_error(self):
+        seq = DNA('TAAACGGAACGCTACGTCTG')
+        index = self._make_index('01000001101011001001')
+        with self.assertRaisesRegex(ValueError, "Invalid character.*'F'"):
+            seq.replace(index, 'F')
+
+    def test_replace_invalid_char_error(self):
+        seq = Sequence('GGGAGCTAGA')
+        index = self._make_index('1000101110')
+        with self.assertRaisesRegex(UnicodeEncodeError,
+                                    "can't encode character.*not in "
+                                    "range\(128\)"):
+            seq.replace(index, '\uFFFF')
+
+    def test_replace_non_single_character_error(self):
+        seq = Sequence('CCGAACTGTC')
+        index = self._make_index('1100110011')
+        with self.assertRaisesRegex(TypeError, 'string of length 2 found'):
+            seq.replace(index, 'AB')
+
+    def _make_index(self, bools):
+        return [bool(int(char)) for char in bools]
+
     def test_lowercase_mungeable_key(self):
         # NOTE: This test relies on Sequence._munge_to_index_array working
         # properly. If the internal implementation of the lowercase method
@@ -1398,10 +1378,10 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         self.assertEqual(seq.frequencies(chars=chars, relative=True),
                          {b'z': 5/11})
 
-        chars = u'z'
-        self.assertEqual(seq.frequencies(chars=chars), {u'z': 5})
+        chars = 'z'
+        self.assertEqual(seq.frequencies(chars=chars), {'z': 5})
         self.assertEqual(seq.frequencies(chars=chars, relative=True),
-                         {u'z': 5/11})
+                         {'z': 5/11})
 
         chars = np.fromstring('z', dtype='|S1')[0]
         self.assertEqual(seq.frequencies(chars=chars), {b'z': 5})
@@ -1414,10 +1394,10 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         self.assertEqual(seq.frequencies(chars=chars, relative=True),
                          {b'x': 0.0, b'z': 5/11})
 
-        chars = {u'x', u'z'}
-        self.assertEqual(seq.frequencies(chars=chars), {u'x': 0, u'z': 5})
+        chars = {'x', 'z'}
+        self.assertEqual(seq.frequencies(chars=chars), {'x': 0, 'z': 5})
         self.assertEqual(seq.frequencies(chars=chars, relative=True),
-                         {u'x': 0.0, u'z': 5/11})
+                         {'x': 0.0, 'z': 5/11})
 
         chars = {
             np.fromstring('x', dtype='|S1')[0],
@@ -1454,32 +1434,33 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
     def test_frequencies_invalid_chars(self):
         seq = Sequence('abcabc')
 
-        with six.assertRaisesRegex(self, ValueError, '0 characters'):
+        with self.assertRaisesRegex(ValueError, '0 characters'):
             seq.frequencies(chars='')
 
-        with six.assertRaisesRegex(self, ValueError, '0 characters'):
+        with self.assertRaisesRegex(ValueError, '0 characters'):
             seq.frequencies(chars={''})
 
-        with six.assertRaisesRegex(self, ValueError, '2 characters'):
+        with self.assertRaisesRegex(ValueError, '2 characters'):
             seq.frequencies(chars='ab')
 
-        with six.assertRaisesRegex(self, ValueError, '2 characters'):
+        with self.assertRaisesRegex(ValueError, '2 characters'):
             seq.frequencies(chars={'b', 'ab'})
 
-        with six.assertRaisesRegex(self, TypeError, 'string.*NoneType'):
+        with self.assertRaisesRegex(TypeError, 'string.*NoneType'):
             seq.frequencies(chars={'a', None})
 
-        with six.assertRaisesRegex(self, ValueError, 'outside the range'):
-            seq.frequencies(chars=u'\u1F30')
+        with self.assertRaisesRegex(ValueError, 'outside the range'):
+            seq.frequencies(chars='\u1F30')
 
-        with six.assertRaisesRegex(self, ValueError, 'outside the range'):
-            seq.frequencies(chars={'c', u'\u1F30'})
+        with self.assertRaisesRegex(ValueError, 'outside the range'):
+            seq.frequencies(chars={'c', '\u1F30'})
 
-        with six.assertRaisesRegex(self, TypeError, 'set.*int'):
+        with self.assertRaisesRegex(TypeError, 'set.*int'):
             seq.frequencies(chars=42)
 
     def _compare_kmers_results(self, observed, expected):
-        for obs, exp in zip_longest(observed, expected, fillvalue=None):
+        for obs, exp in itertools.zip_longest(observed, expected,
+                                              fillvalue=None):
             self.assertEqual(obs, exp)
 
     def test_iter_kmers(self):
@@ -1707,6 +1688,16 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         ]
         self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
 
+    def test_iter_kmers_empty_sequence(self):
+        seq = Sequence('')
+        expected = []
+        self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
+
+    def test_iter_kmers_empty_sequence_with_positional_metadata(self):
+        seq = Sequence('', positional_metadata={'quality': []})
+        expected = []
+        self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
+
     def test_kmer_frequencies_empty_sequence(self):
         seq = Sequence('')
 
@@ -1925,13 +1916,6 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
             self.assertIsNot(seq_copy, seq)
             self.assertIsNot(seq_copy._bytes, seq._bytes)
 
-            # metadata attributes should be None and not initialized to a
-            # "missing" representation
-            self.assertIsNone(seq._metadata)
-            self.assertIsNone(seq._positional_metadata)
-            self.assertIsNone(seq_copy._metadata)
-            self.assertIsNone(seq_copy._positional_metadata)
-
     def test_copy_with_metadata_shallow(self):
         # copy.copy and Sequence.copy should behave identically
         for copy_method in lambda seq: seq.copy(), copy.copy:
@@ -2164,22 +2148,22 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
         seq = Sequence(seq_str,
                        positional_metadata={'quality': range(len(seq_str))})
 
-        with six.assertRaisesRegex(self, ValueError,
-                                   "No positional metadata associated with "
-                                   "key 'introns'"):
+        with self.assertRaisesRegex(ValueError,
+                                    "No positional metadata associated with "
+                                    "key 'introns'"):
             seq._munge_to_index_array('introns')
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   "Column 'quality' in positional metadata "
-                                   "does not correspond to a boolean "
-                                   "vector"):
+        with self.assertRaisesRegex(TypeError,
+                                    "Column 'quality' in positional metadata "
+                                    "does not correspond to a boolean "
+                                    "vector"):
             seq._munge_to_index_array('quality')
 
     def test_munge_to_bytestring_return_bytes(self):
         seq = Sequence('')
         m = 'dummy_method'
         str_inputs = ('', 'a', 'acgt')
-        unicode_inputs = (u'', u'a', u'acgt')
+        unicode_inputs = ('', 'a', 'acgt')
         byte_inputs = (b'', b'a', b'acgt')
         seq_inputs = (Sequence(''), Sequence('a'), Sequence('acgt'))
         all_inputs = str_inputs + unicode_inputs + byte_inputs + seq_inputs
@@ -2192,12 +2176,12 @@ class TestSequence(TestSequenceBase, ReallyEqualMixin):
 
     def test_munge_to_bytestring_unicode_out_of_ascii_range(self):
         seq = Sequence('')
-        all_inputs = (u'\x80', u'abc\x80', u'\x80abc')
+        all_inputs = ('\x80', 'abc\x80', '\x80abc')
         for input_ in all_inputs:
-            with six.assertRaisesRegex(self, UnicodeEncodeError,
-                                       "'ascii' codec can't encode character"
-                                       ".*in position.*: ordinal not in"
-                                       " range\(128\)"):
+            with self.assertRaisesRegex(UnicodeEncodeError,
+                                        "'ascii' codec can't encode character"
+                                        ".*in position.*: ordinal not in"
+                                        " range\(128\)"):
                 seq._munge_to_bytestring(input_, 'dummy_method')
 
 
@@ -2238,16 +2222,16 @@ class TestDistance(TestSequenceBase):
         seq1 = SequenceSubclass("abcdef")
         seq2 = Sequence("12bcef")
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'SequenceSubclass.*Sequence.*`distance`'):
+        with self.assertRaisesRegex(TypeError,
+                                    'SequenceSubclass.*Sequence.*`distance`'):
             seq1.distance(seq2)
 
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Sequence.*SequenceSubclass.*`distance`'):
+        with self.assertRaisesRegex(TypeError,
+                                    'Sequence.*SequenceSubclass.*`distance`'):
             seq2.distance(seq1)
 
     def test_munging_invalid_characters_to_self_type(self):
-        with six.assertRaisesRegex(self, ValueError, 'Invalid characters.*X'):
+        with self.assertRaisesRegex(ValueError, 'Invalid characters.*X'):
             DNA("ACGT").distance("WXYZ")
 
     def test_munging_invalid_type_to_self_type(self):
@@ -2266,7 +2250,7 @@ class TestDistance(TestSequenceBase):
         def metric(a, b):
             return 'too far'
 
-        with six.assertRaisesRegex(self, ValueError, 'string.*float'):
+        with self.assertRaisesRegex(ValueError, 'string.*float'):
             Sequence('abc').distance('cba', metric=metric)
 
     def test_arbitrary_metric(self):
@@ -2348,9 +2332,9 @@ class TestDistance(TestSequenceBase):
 #
 # these doctests exercise the correct formatting of Sequence's repr in a
 # variety of situations. they are more extensive than the unit tests above
-# (TestSequence.test_repr) but are only currently run in py3. thus, they cannot
-# be relied upon for coverage (the unit tests take care of this)
-class SequenceReprDoctests(object):
+# (TestSequence.test_repr) but cannot be relied upon for coverage (the unit
+# tests take care of this)
+class SequenceReprDoctests:
     r"""
     >>> import pandas as pd
     >>> from skbio import Sequence
diff --git a/skbio/stats/__init__.py b/skbio/stats/__init__.py
index 1b5809f..922fd0b 100644
--- a/skbio/stats/__init__.py
+++ b/skbio/stats/__init__.py
@@ -39,8 +39,6 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._subsample import subsample_counts, isubsample
diff --git a/skbio/stats/__subsample.c b/skbio/stats/__subsample.c
index 73505ab..920ed40 100644
--- a/skbio/stats/__subsample.c
+++ b/skbio/stats/__subsample.c
@@ -230,8 +230,8 @@ static CYTHON_INLINE float __PYX_NAN() {
   #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
   #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
 #else
-  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
-  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
+  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_Divide(x,y)
+  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceDivide(x,y)
 #endif
 
 #ifndef __PYX_EXTERN_C
@@ -1225,7 +1225,7 @@ static PyObject *__pyx_tuple__6;
 static PyObject *__pyx_tuple__7;
 static PyObject *__pyx_codeobj__8;
 
-/* "skbio/stats/__subsample.pyx":15
+/* "skbio/stats/__subsample.pyx":13
  * 
  * 
  * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
@@ -1267,16 +1267,16 @@ static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without
         case  1:
         if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
         case  2:
         if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts_sum)) != 0)) kw_args--;
         else {
-          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+          __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
         }
       }
       if (unlikely(kw_args > 0)) {
-        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_subsample_counts_without_replacement") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_subsample_counts_without_replacement") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
       goto __pyx_L5_argtuple_error;
@@ -1291,13 +1291,13 @@ static PyObject *__pyx_pw_5skbio_5stats_11__subsample_1_subsample_counts_without
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("_subsample_counts_without_replacement", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("skbio.stats.__subsample._subsample_counts_without_replacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
   __Pyx_RefNannyFinishContext();
   return NULL;
   __pyx_L4_argument_unpacking_done:;
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_counts), __pyx_ptype_5numpy_ndarray, 1, "counts", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_counts), __pyx_ptype_5numpy_ndarray, 1, "counts", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_r = __pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_replacement(__pyx_self, __pyx_v_counts, __pyx_v_n, __pyx_v_counts_sum);
 
   /* function exit code */
@@ -1367,36 +1367,36 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   __pyx_pybuffernd_counts.rcbuffer = &__pyx_pybuffer_counts;
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_counts.rcbuffer->pybuffer, (PyObject*)__pyx_v_counts, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_counts.rcbuffer->pybuffer, (PyObject*)__pyx_v_counts, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_pybuffernd_counts.diminfo[0].strides = __pyx_pybuffernd_counts.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_counts.diminfo[0].shape = __pyx_pybuffernd_counts.rcbuffer->pybuffer.shape[0];
 
-  /* "skbio/stats/__subsample.pyx":22
+  /* "skbio/stats/__subsample.pyx":20
  *         Py_ssize_t unpacked_idx, i, j
  * 
  *     unpacked = np.empty(counts_sum, dtype=int)             # <<<<<<<<<<<<<<
  *     unpacked_idx = 0
  *     for i in range(counts.shape[0]):
  */
-  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_v_counts_sum);
   __Pyx_GIVEREF(__pyx_v_counts_sum);
   PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_counts_sum);
-  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -1412,13 +1412,13 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
       }
     }
     __pyx_pybuffernd_unpacked.diminfo[0].strides = __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_unpacked.diminfo[0].shape = __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_5 = 0;
   __pyx_v_unpacked = ((PyArrayObject *)__pyx_t_4);
   __pyx_t_4 = 0;
 
-  /* "skbio/stats/__subsample.pyx":23
+  /* "skbio/stats/__subsample.pyx":21
  * 
  *     unpacked = np.empty(counts_sum, dtype=int)
  *     unpacked_idx = 0             # <<<<<<<<<<<<<<
@@ -1427,7 +1427,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
  */
   __pyx_v_unpacked_idx = 0;
 
-  /* "skbio/stats/__subsample.pyx":24
+  /* "skbio/stats/__subsample.pyx":22
  *     unpacked = np.empty(counts_sum, dtype=int)
  *     unpacked_idx = 0
  *     for i in range(counts.shape[0]):             # <<<<<<<<<<<<<<
@@ -1438,7 +1438,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
     __pyx_v_i = __pyx_t_11;
 
-    /* "skbio/stats/__subsample.pyx":25
+    /* "skbio/stats/__subsample.pyx":23
  *     unpacked_idx = 0
  *     for i in range(counts.shape[0]):
  *         cnt = counts[i]             # <<<<<<<<<<<<<<
@@ -1453,11 +1453,11 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_counts.diminfo[0].shape)) __pyx_t_6 = 0;
     if (unlikely(__pyx_t_6 != -1)) {
       __Pyx_RaiseBufferIndexError(__pyx_t_6);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __pyx_v_cnt = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_counts.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_counts.diminfo[0].strides));
 
-    /* "skbio/stats/__subsample.pyx":26
+    /* "skbio/stats/__subsample.pyx":24
  *     for i in range(counts.shape[0]):
  *         cnt = counts[i]
  *         for j in range(cnt):             # <<<<<<<<<<<<<<
@@ -1468,7 +1468,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) {
       __pyx_v_j = __pyx_t_14;
 
-      /* "skbio/stats/__subsample.pyx":27
+      /* "skbio/stats/__subsample.pyx":25
  *         cnt = counts[i]
  *         for j in range(cnt):
  *             unpacked[unpacked_idx] = i             # <<<<<<<<<<<<<<
@@ -1483,11 +1483,11 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
       } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_unpacked.diminfo[0].shape)) __pyx_t_6 = 0;
       if (unlikely(__pyx_t_6 != -1)) {
         __Pyx_RaiseBufferIndexError(__pyx_t_6);
-        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
       }
       *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_unpacked.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_unpacked.diminfo[0].strides) = __pyx_v_i;
 
-      /* "skbio/stats/__subsample.pyx":28
+      /* "skbio/stats/__subsample.pyx":26
  *         for j in range(cnt):
  *             unpacked[unpacked_idx] = i
  *             unpacked_idx += 1             # <<<<<<<<<<<<<<
@@ -1498,19 +1498,19 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     }
   }
 
-  /* "skbio/stats/__subsample.pyx":30
+  /* "skbio/stats/__subsample.pyx":28
  *             unpacked_idx += 1
  * 
  *     permuted = np.random.permutation(unpacked)[:n]             # <<<<<<<<<<<<<<
  * 
  *     result = np.zeros_like(counts)
  */
-  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
-  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_permutation); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_permutation); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_t_1 = NULL;
@@ -1524,24 +1524,24 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     }
   }
   if (!__pyx_t_1) {
-    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_unpacked)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_unpacked)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
   } else {
-    __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
     __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = NULL;
     __Pyx_INCREF(((PyObject *)__pyx_v_unpacked));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_unpacked));
     PyTuple_SET_ITEM(__pyx_t_2, 0+1, ((PyObject *)__pyx_v_unpacked));
-    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_4);
     __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   }
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-  __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_4, 0, 0, NULL, &__pyx_v_n, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_4, 0, 0, NULL, &__pyx_v_n, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -1557,22 +1557,22 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
       }
     }
     __pyx_pybuffernd_permuted.diminfo[0].strides = __pyx_pybuffernd_permuted.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_permuted.diminfo[0].shape = __pyx_pybuffernd_permuted.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_5 = 0;
   __pyx_v_permuted = ((PyArrayObject *)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "skbio/stats/__subsample.pyx":32
+  /* "skbio/stats/__subsample.pyx":30
  *     permuted = np.random.permutation(unpacked)[:n]
  * 
  *     result = np.zeros_like(counts)             # <<<<<<<<<<<<<<
  *     for idx in range(permuted.shape[0]):
  *         result[permuted[idx]] += 1
  */
-  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_4);
-  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
   __pyx_t_4 = NULL;
@@ -1586,21 +1586,21 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     }
   }
   if (!__pyx_t_4) {
-    __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_counts)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_counts)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
   } else {
-    __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
     __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); __pyx_t_4 = NULL;
     __Pyx_INCREF(((PyObject *)__pyx_v_counts));
     __Pyx_GIVEREF(((PyObject *)__pyx_v_counts));
     PyTuple_SET_ITEM(__pyx_t_1, 0+1, ((PyObject *)__pyx_v_counts));
-    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_3);
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_t_5 = ((PyArrayObject *)__pyx_t_3);
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
@@ -1616,13 +1616,13 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
       }
     }
     __pyx_pybuffernd_result.diminfo[0].strides = __pyx_pybuffernd_result.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_result.diminfo[0].shape = __pyx_pybuffernd_result.rcbuffer->pybuffer.shape[0];
-    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_t_5 = 0;
   __pyx_v_result = ((PyArrayObject *)__pyx_t_3);
   __pyx_t_3 = 0;
 
-  /* "skbio/stats/__subsample.pyx":33
+  /* "skbio/stats/__subsample.pyx":31
  * 
  *     result = np.zeros_like(counts)
  *     for idx in range(permuted.shape[0]):             # <<<<<<<<<<<<<<
@@ -1633,7 +1633,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_10; __pyx_t_16+=1) {
     __pyx_v_idx = __pyx_t_16;
 
-    /* "skbio/stats/__subsample.pyx":34
+    /* "skbio/stats/__subsample.pyx":32
  *     result = np.zeros_like(counts)
  *     for idx in range(permuted.shape[0]):
  *         result[permuted[idx]] += 1             # <<<<<<<<<<<<<<
@@ -1648,7 +1648,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_permuted.diminfo[0].shape)) __pyx_t_6 = 0;
     if (unlikely(__pyx_t_6 != -1)) {
       __Pyx_RaiseBufferIndexError(__pyx_t_6);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     __pyx_t_13 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_permuted.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_permuted.diminfo[0].strides));
     __pyx_t_6 = -1;
@@ -1658,12 +1658,12 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
     } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_result.diminfo[0].shape)) __pyx_t_6 = 0;
     if (unlikely(__pyx_t_6 != -1)) {
       __Pyx_RaiseBufferIndexError(__pyx_t_6);
-      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+      {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     }
     *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_result.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_result.diminfo[0].strides) += 1;
   }
 
-  /* "skbio/stats/__subsample.pyx":36
+  /* "skbio/stats/__subsample.pyx":34
  *         result[permuted[idx]] += 1
  * 
  *     return result             # <<<<<<<<<<<<<<
@@ -1673,7 +1673,7 @@ static PyObject *__pyx_pf_5skbio_5stats_11__subsample__subsample_counts_without_
   __pyx_r = ((PyObject *)__pyx_v_result);
   goto __pyx_L0;
 
-  /* "skbio/stats/__subsample.pyx":15
+  /* "skbio/stats/__subsample.pyx":13
  * 
  * 
  * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
@@ -3913,7 +3913,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
   {0, 0, 0, 0, 0, 0, 0}
 };
 static int __Pyx_InitCachedBuiltins(void) {
-  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;
@@ -3991,17 +3991,17 @@ static int __Pyx_InitCachedConstants(void) {
   __Pyx_GOTREF(__pyx_tuple__6);
   __Pyx_GIVEREF(__pyx_tuple__6);
 
-  /* "skbio/stats/__subsample.pyx":15
+  /* "skbio/stats/__subsample.pyx":13
  * 
  * 
  * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
  *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
  *     cdef:
  */
-  __pyx_tuple__7 = PyTuple_Pack(11, __pyx_n_s_counts, __pyx_n_s_n, __pyx_n_s_counts_sum, __pyx_n_s_result, __pyx_n_s_permuted, __pyx_n_s_unpacked, __pyx_n_s_cnt, __pyx_n_s_unpacked_idx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_tuple__7 = PyTuple_Pack(11, __pyx_n_s_counts, __pyx_n_s_n, __pyx_n_s_counts_sum, __pyx_n_s_result, __pyx_n_s_permuted, __pyx_n_s_unpacked, __pyx_n_s_cnt, __pyx_n_s_unpacked_idx, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_idx); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_tuple__7);
   __Pyx_GIVEREF(__pyx_tuple__7);
-  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_subsample_counts_without_replac, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_Users_jairideout_dev_scikit_bio, __pyx_n_s_subsample_counts_without_replac, 13, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_RefNannyFinishContext();
   return 0;
   __pyx_L1_error:;
@@ -4122,28 +4122,28 @@ PyMODINIT_FUNC PyInit___subsample(void)
   if (__Pyx_patch_abc() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   #endif
 
-  /* "skbio/stats/__subsample.pyx":11
- * from __future__ import absolute_import, division, print_function
+  /* "skbio/stats/__subsample.pyx":9
+ * # ----------------------------------------------------------------------------
  * 
  * import numpy as np             # <<<<<<<<<<<<<<
  * cimport numpy as cnp
  * 
  */
-  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "skbio/stats/__subsample.pyx":15
+  /* "skbio/stats/__subsample.pyx":13
  * 
  * 
  * def _subsample_counts_without_replacement(             # <<<<<<<<<<<<<<
  *     cnp.ndarray[cnp.int64_t, ndim=1] counts, n, counts_sum):
  *     cdef:
  */
-  __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, NULL, __pyx_n_s_skbio_stats___subsample); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5skbio_5stats_11__subsample_1_subsample_counts_without_replacement, NULL, __pyx_n_s_skbio_stats___subsample); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  if (PyDict_SetItem(__pyx_d, __pyx_n_s_subsample_counts_without_replac, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (PyDict_SetItem(__pyx_d, __pyx_n_s_subsample_counts_without_replac, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
   /* "skbio/stats/__subsample.pyx":1
diff --git a/skbio/stats/__subsample.pyx b/skbio/stats/__subsample.pyx
index 4a04943..b9e7827 100644
--- a/skbio/stats/__subsample.pyx
+++ b/skbio/stats/__subsample.pyx
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 cimport numpy as cnp
 
diff --git a/skbio/stats/_misc.py b/skbio/stats/_misc.py
index 7ef43b3..93218fe 100644
--- a/skbio/stats/_misc.py
+++ b/skbio/stats/_misc.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 
 def _pprint_strs(strs, max_chars=80, delimiter=', ', suffix='...',):
     """Pretty-print an iterable of strings, truncating if necessary."""
diff --git a/skbio/stats/_subsample.py b/skbio/stats/_subsample.py
index ba27f4c..8d24e7a 100644
--- a/skbio/stats/_subsample.py
+++ b/skbio/stats/_subsample.py
@@ -6,23 +6,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import viewitems
-
 import sys
-from warnings import warn
 from heapq import heappush, heappop
 from collections import defaultdict
 from copy import copy
 
 import numpy as np
 
-from skbio.util import EfficiencyWarning
-try:
-    from .__subsample import _subsample_counts_without_replacement
-except ImportError:
-    pass
 from skbio.util._decorator import experimental
+from .__subsample import _subsample_counts_without_replacement
 
 
 @experimental(as_of="0.4.0")
@@ -147,7 +139,7 @@ def isubsample(items, maximum, minimum=1, buf_size=1000, bin_f=None):
             heappop(heap)
 
     # yield items
-    for bin_, heap in viewitems(result):
+    for bin_, heap in result.items():
         if len(heap) < minimum:
             continue
 
@@ -250,20 +242,6 @@ def subsample_counts(counts, n, replace=False):
         if counts_sum == n:
             result = counts
         else:
-            try:
-                result = _subsample_counts_without_replacement(counts, n,
-                                                               counts_sum)
-            except NameError:
-                warn("Accelerated subsampling without replacement isn't"
-                     " available.", EfficiencyWarning)
-
-                nz = counts.nonzero()[0]
-                unpacked = np.concatenate([np.repeat(np.array(i,), counts[i])
-                                           for i in nz])
-                permuted = np.random.permutation(unpacked)[:n]
-
-                result = np.zeros(len(counts), dtype=int)
-                for p in permuted:
-                    result[p] += 1
-
+            result = _subsample_counts_without_replacement(counts, n,
+                                                           counts_sum)
     return result
diff --git a/skbio/stats/composition.py b/skbio/stats/composition.py
index ba5bde4..326216b 100644
--- a/skbio/stats/composition.py
+++ b/skbio/stats/composition.py
@@ -99,10 +99,10 @@ array([ 0.25,  0.25,  0.5 ])
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 import numpy as np
 import pandas as pd
 import scipy.stats
+import skbio.util
 from skbio.util._decorator import experimental
 
 
@@ -644,15 +644,16 @@ def ancom(table, grouping,
           alpha=0.05,
           tau=0.02,
           theta=0.1,
-          multiple_comparisons_correction=None,
-          significance_test=None):
+          multiple_comparisons_correction='holm-bonferroni',
+          significance_test=None,
+          percentiles=(0.0, 25.0, 50.0, 75.0, 100.0)):
     r""" Performs a differential abundance test using ANCOM.
 
     This is done by calculating pairwise log ratios between all features
     and performing a significance test to determine if there is a significant
     difference in feature ratios with respect to the variable of interest.
 
-    In an experiment with only two treatments, this test tests the following
+    In an experiment with only two treatments, this tests the following
     hypothesis for feature :math:`i`
 
     .. math::
@@ -697,6 +698,10 @@ def ancom(table, grouping,
         classes.  This function must be able to accept at least two 1D
         array_like arguments of floats and returns a test statistic and a
         p-value. By default ``scipy.stats.f_oneway`` is used.
+    percentiles : iterable of floats, optional
+        Percentile abundances to return for each feature in each group. By
+        default, will return the minimum, 25th percentile, median, 75th
+        percentile, and maximum abundances for each feature in each group.
 
     Returns
     -------
@@ -707,7 +712,15 @@ def ancom(table, grouping,
         `"W"` is the W-statistic, or number of features that a single feature
         is tested to be significantly different against.
 
-        `"reject"` indicates if feature is significantly different or not.
+        `"Reject null hypothesis"` indicates if feature is differentially
+        abundant across groups (`True`) or not (`False`).
+
+    pd.DataFrame
+        A table of features and their percentile abundances in each group. If
+        ``percentiles`` is empty, this will be an empty ``pd.DataFrame``. The
+        rows in this object will be features, and the columns will be a
+        multi-index where the first index is the percentile, and the second
+        index is the group.
 
     See Also
     --------
@@ -720,22 +733,24 @@ def ancom(table, grouping,
     Notes
     -----
     The developers of this method recommend the following significance tests
-    ([2]_, Supplementary File 1, top of page 11): the standard parametric
-    t-test (``scipy.stats.ttest_ind``) or one-way ANOVA
-    (``scipy.stats.f_oneway``) if the number of groups is greater
-    than 2, or non-parametric variants such as Wilcoxon rank sum
-    (``scipy.stats.wilcoxon``) or Kruskal-Wallis (``scipy.stats.kruskal``)
-    if the number of groups is greater than 2.  Because one-way ANOVA is
-    equivalent to the standard t-test when the number of groups is two,
-    we default to ``scipy.stats.f_oneway`` here, which can be used when
-    there are two or more groups.  Users should refer to the documentation
-    of these tests in SciPy to understand the assumptions made by each test.
+    ([2]_, Supplementary File 1, top of page 11): if there are 2 groups, use
+    the standard parametric t-test (``scipy.stats.ttest_ind``) or
+    non-parametric Wilcoxon rank sum test (``scipy.stats.wilcoxon``).
+    If there are more than 2 groups, use parametric one-way ANOVA
+    (``scipy.stats.f_oneway``) or nonparametric Kruskal-Wallis
+    (``scipy.stats.kruskal``). Because one-way ANOVA is equivalent
+    to the standard t-test when the number of groups is two, we default to
+    ``scipy.stats.f_oneway`` here, which can be used when there are two or
+    more groups.  Users should refer to the documentation of these tests in
+    SciPy to understand the assumptions made by each test.
 
     This method cannot handle any zero counts as input, since the logarithm
     of zero cannot be computed.  While this is an unsolved problem, many
-    studies have shown promising results by replacing the zeros with pseudo
-    counts. This can be also be done via the ``multiplicative_replacement``
-    method.
+    studies, including [2]_, have shown promising results by adding
+    pseudocounts to all values in the matrix. In [2]_, a pseudocount of 0.001
+    was used, though the authors note that a pseudocount of 1.0 may also be
+    useful. Zero counts can also be addressed using the
+    ``multiplicative_replacement`` method.
 
     References
     ----------
@@ -752,7 +767,8 @@ def ancom(table, grouping,
     >>> from skbio.stats.composition import ancom
     >>> import pandas as pd
 
-    Now let's load in a pd.DataFrame with 6 samples and 7 unknown bacteria:
+    Now let's load in a DataFrame with 6 samples and 7 features (e.g.,
+    these may be bacterial OTUs):
 
     >>> table = pd.DataFrame([[12, 11, 10, 10, 10, 10, 10],
     ...                       [9,  11, 12, 10, 10, 10, 10],
@@ -760,25 +776,28 @@ def ancom(table, grouping,
     ...                       [22, 21, 9,  10, 10, 10, 10],
     ...                       [20, 22, 10, 10, 13, 10, 10],
     ...                       [23, 21, 14, 10, 10, 10, 10]],
-    ...                      index=['s1','s2','s3','s4','s5','s6'],
-    ...                      columns=['b1','b2','b3','b4','b5','b6','b7'])
+    ...                      index=['s1', 's2', 's3', 's4', 's5', 's6'],
+    ...                      columns=['b1', 'b2', 'b3', 'b4', 'b5', 'b6',
+    ...                               'b7'])
 
-    Then create a grouping vector.  In this scenario, there
-    are only two classes, and suppose these classes correspond to the
-    treatment due to a drug and a control.  The first three samples
-    are controls and the last three samples are treatments.
+    Then create a grouping vector. In this example, there is a treatment group
+    and a placebo group.
 
-    >>> grouping = pd.Series([0, 0, 0, 1, 1, 1],
-    ...                      index=['s1','s2','s3','s4','s5','s6'])
+    >>> grouping = pd.Series(['treatment', 'treatment', 'treatment',
+    ...                       'placebo', 'placebo', 'placebo'],
+    ...                      index=['s1', 's2', 's3', 's4', 's5', 's6'])
 
-    Now run ``ancom`` and see if there are any features that have any
-    significant differences between the treatment and the control.
+    Now run ``ancom`` to determine if there are any features that are
+    significantly different in abundance between the treatment and the placebo
+    groups. The first DataFrame that is returned contains the ANCOM test
+    results, and the second contains the percentile abundance data for each
+    feature in each group.
 
-    >>> results = ancom(table, grouping)
-    >>> results['W']
+    >>> ancom_df, percentile_df = ancom(table, grouping)
+    >>> ancom_df['W']
     b1    0
     b2    4
-    b3    1
+    b3    0
     b4    1
     b5    1
     b6    0
@@ -788,10 +807,13 @@ def ancom(table, grouping,
     The W-statistic is the number of features that a single feature is tested
     to be significantly different against.  In this scenario, `b2` was detected
     to have significantly different abundances compared to four of the other
-    species. To summarize the results from the W-statistic, let's take a look
-    at the results from the hypothesis test:
+    features. To summarize the results from the W-statistic, let's take a look
+    at the results from the hypothesis test. The `Reject null hypothesis`
+    column in the table indicates whether the null hypothesis was rejected,
+    and that a feature was therefore observed to be differentially abundant
+    across the groups.
 
-    >>> results['reject']
+    >>> ancom_df['Reject null hypothesis']
     b1    False
     b2     True
     b3    False
@@ -799,13 +821,44 @@ def ancom(table, grouping,
     b5    False
     b6    False
     b7    False
-    Name: reject, dtype: bool
-
-    From this we can conclude that only `b2` was significantly
-    different between the treatment and the control.
+    Name: Reject null hypothesis, dtype: bool
+
+    From this we can conclude that only `b2` was significantly different in
+    abundance between the treatment and the placebo. We still don't know, for
+    example, in which group `b2` was more abundant. We therefore may next be
+    interested in comparing the abundance of `b2` across the two groups.
+    We can do that using the second DataFrame that was returned. Here we
+    compare the median (50th percentile) abundance of `b2` in the treatment and
+    placebo groups:
+
+    >>> percentile_df[50.0].loc['b2']
+    Group
+    placebo      21.0
+    treatment    11.0
+    Name: b2, dtype: float64
+
+    We can also look at a full five-number summary for ``b2`` in the treatment
+    and placebo groups:
+
+    >>> percentile_df.loc['b2'] # doctest: +NORMALIZE_WHITESPACE
+    Percentile  Group
+    0.0         placebo      21.0
+    25.0        placebo      21.0
+    50.0        placebo      21.0
+    75.0        placebo      21.5
+    100.0       placebo      22.0
+    0.0         treatment    11.0
+    25.0        treatment    11.0
+    50.0        treatment    11.0
+    75.0        treatment    11.0
+    100.0       treatment    11.0
+    Name: b2, dtype: float64
+
+    Taken together, these data tell us that `b2` is present in significantly
+    higher abundance in the placebo group samples than in the treatment group
+    samples.
 
     """
-
     if not isinstance(table, pd.DataFrame):
         raise TypeError('`table` must be a `pd.DataFrame`, '
                         'not %r.' % type(table).__name__)
@@ -815,7 +868,7 @@ def ancom(table, grouping,
 
     if np.any(table <= 0):
         raise ValueError('Cannot handle zeros or negative values in `table`. '
-                         'Use pseudo counts or ``multiplicative_replacement``.'
+                         'Use pseudocounts or ``multiplicative_replacement``.'
                          )
 
     if not 0 < alpha < 1:
@@ -839,8 +892,20 @@ def ancom(table, grouping,
     if (table.isnull()).any().any():
         raise ValueError('Cannot handle missing values in `table`.')
 
-    groups, _grouping = np.unique(grouping, return_inverse=True)
-    grouping = pd.Series(_grouping, index=grouping.index)
+    percentiles = list(percentiles)
+    for percentile in percentiles:
+        if not 0.0 <= percentile <= 100.0:
+            raise ValueError('Percentiles must be in the range [0, 100], %r '
+                             'was provided.' % percentile)
+
+    duplicates = skbio.util.find_duplicates(percentiles)
+    if duplicates:
+        formatted_duplicates = ', '.join(repr(e) for e in duplicates)
+        raise ValueError('Percentile values must be unique. The following'
+                         ' value(s) were duplicated: %s.' %
+                         formatted_duplicates)
+
+    groups = np.unique(grouping)
     num_groups = len(groups)
 
     if num_groups == len(grouping):
@@ -897,9 +962,27 @@ def ancom(table, grouping,
         else:
             nu = cutoff[4]
         reject = (W >= nu*n_feat)
-    labs = mat.columns
-    return pd.DataFrame({'W': pd.Series(W, index=labs),
-                         'reject': pd.Series(reject, index=labs)})
+
+    feat_ids = mat.columns
+    ancom_df = pd.DataFrame(
+        {'W': pd.Series(W, index=feat_ids),
+         'Reject null hypothesis': pd.Series(reject, index=feat_ids)})
+
+    if len(percentiles) == 0:
+        return ancom_df, pd.DataFrame()
+    else:
+        data = []
+        columns = []
+        for group in groups:
+            feat_dists = mat[cats == group]
+            for percentile in percentiles:
+                columns.append((percentile, group))
+                data.append(np.percentile(feat_dists, percentile, axis=0))
+        columns = pd.MultiIndex.from_tuples(columns,
+                                            names=['Percentile', 'Group'])
+        percentile_df = pd.DataFrame(
+            np.asarray(data).T, columns=columns, index=feat_ids)
+        return ancom_df, percentile_df
 
 
 def _holm_bonferroni(p):
diff --git a/skbio/stats/distance/__init__.py b/skbio/stats/distance/__init__.py
index 1f68724..78bdcc0 100644
--- a/skbio/stats/distance/__init__.py
+++ b/skbio/stats/distance/__init__.py
@@ -187,8 +187,6 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._base import (DissimilarityMatrixError, DistanceMatrixError,
diff --git a/skbio/stats/distance/_anosim.py b/skbio/stats/distance/_anosim.py
index 93d8961..a37d35e 100644
--- a/skbio/stats/distance/_anosim.py
+++ b/skbio/stats/distance/_anosim.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from functools import partial
 
 import numpy as np
diff --git a/skbio/stats/distance/_base.py b/skbio/stats/distance/_base.py
index b1ecf22..7281b26 100644
--- a/skbio/stats/distance/_base.py
+++ b/skbio/stats/distance/_base.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from six import string_types
-
 from copy import deepcopy
 
 import matplotlib.pyplot as plt
@@ -431,10 +428,10 @@ class DissimilarityMatrix(SkbioObject):
         ...                      [2, 3, 0]], ids=['a', 'b', 'c'])
         >>> df = dm.to_data_frame()
         >>> df
-           a  b  c
-        a  0  1  2
-        b  1  0  3
-        c  2  3  0
+             a    b    c
+        a  0.0  1.0  2.0
+        b  1.0  0.0  3.0
+        c  2.0  3.0  0.0
 
         """
         return pd.DataFrame(data=self.data, index=self.ids, columns=self.ids)
@@ -588,7 +585,7 @@ class DissimilarityMatrix(SkbioObject):
         The lookup based on ID(s) is quick.
 
         """
-        if isinstance(index, string_types):
+        if isinstance(index, str):
             return self.data[self.index(index)]
         elif self._is_id_pair(index):
             return self.data[self.index(index[0]), self.index(index[1])]
@@ -647,7 +644,7 @@ class DissimilarityMatrix(SkbioObject):
     def _is_id_pair(self, index):
         return (isinstance(index, tuple) and
                 len(index) == 2 and
-                all(map(lambda e: isinstance(e, string_types), index)))
+                all(map(lambda e: isinstance(e, str), index)))
 
 
 class DistanceMatrix(DissimilarityMatrix):
diff --git a/skbio/stats/distance/_bioenv.py b/skbio/stats/distance/_bioenv.py
index b4288f7..0eb2882 100644
--- a/skbio/stats/distance/_bioenv.py
+++ b/skbio/stats/distance/_bioenv.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from itertools import combinations
 
 import numpy as np
diff --git a/skbio/stats/distance/_mantel.py b/skbio/stats/distance/_mantel.py
index 8e2d3f6..ac308c4 100644
--- a/skbio/stats/distance/_mantel.py
+++ b/skbio/stats/distance/_mantel.py
@@ -6,12 +6,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import zip
-
 from itertools import combinations
 
-import six
 import numpy as np
 import pandas as pd
 import scipy.misc
@@ -410,9 +406,9 @@ def pwmantel(dms, labels=None, method='pearson', permutations=999,
 
     for i, pair in enumerate(combinations(zip(labels, dms), 2)):
         (xlabel, x), (ylabel, y) = pair
-        if isinstance(x, six.string_types):
+        if isinstance(x, str):
             x = DistanceMatrix.read(x)
-        if isinstance(y, six.string_types):
+        if isinstance(y, str):
             y = DistanceMatrix.read(y)
 
         stat, p_val, n = mantel(x, y, method=method, permutations=permutations,
diff --git a/skbio/stats/distance/_permanova.py b/skbio/stats/distance/_permanova.py
index 95d27ad..32c8097 100644
--- a/skbio/stats/distance/_permanova.py
+++ b/skbio/stats/distance/_permanova.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import range
-
 from functools import partial
 
 import numpy as np
diff --git a/skbio/stats/distance/tests/__init__.py b/skbio/stats/distance/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/stats/distance/tests/__init__.py
+++ b/skbio/stats/distance/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/distance/tests/test_anosim.py b/skbio/stats/distance/tests/test_anosim.py
index 2f671bb..f722eb5 100644
--- a/skbio/stats/distance/tests/test_anosim.py
+++ b/skbio/stats/distance/tests/test_anosim.py
@@ -6,9 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from six import StringIO
-
+import io
 from functools import partial
 from unittest import TestCase, main
 
@@ -29,8 +27,8 @@ class TestANOSIM(TestCase):
         dm_ids = ['s1', 's2', 's3', 's4']
         self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
         self.df = pd.read_csv(
-            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
-                     's1,Control'), index_col=0)
+            io.StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
+                        's1,Control'), index_col=0)
 
         self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
                                        [1, 0, 3, 2],
diff --git a/skbio/stats/distance/tests/test_base.py b/skbio/stats/distance/tests/test_base.py
index 71e7bde..f0a156b 100644
--- a/skbio/stats/distance/tests/test_base.py
+++ b/skbio/stats/distance/tests/test_base.py
@@ -6,13 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import zip
-from six import StringIO, binary_type, text_type
-
+import io
 from unittest import TestCase, main
 
-import six
 import matplotlib as mpl
 import numpy as np
 import numpy.testing as npt
@@ -318,15 +314,12 @@ class DissimilarityMatrixTests(DissimilarityMatrixTestData):
     def test_repr_png(self):
         dm = self.dm_1x1
         obs = dm._repr_png_()
-        self.assertIsInstance(obs, binary_type)
+        self.assertIsInstance(obs, bytes)
         self.assertTrue(len(obs) > 0)
 
     def test_repr_svg(self):
         obs = self.dm_1x1._repr_svg_()
-        # print_figure(format='svg') can return text or bytes depending on the
-        # version of IPython
-        self.assertTrue(isinstance(obs, text_type) or
-                        isinstance(obs, binary_type))
+        self.assertIsInstance(obs, str)
         self.assertTrue(len(obs) > 0)
 
     def test_png(self):
@@ -493,7 +486,7 @@ class DistanceMatrixTests(DissimilarityMatrixTestData):
             DistanceMatrix([[1, 2, 3]], ['a'])
 
     def test_init_nans(self):
-        with six.assertRaisesRegex(self, DistanceMatrixError, 'NaNs'):
+        with self.assertRaisesRegex(DistanceMatrixError, 'NaNs'):
             DistanceMatrix([[0.0, np.nan], [np.nan, 0.0]], ['a', 'b'])
 
     def test_from_iterable_no_key(self):
@@ -719,10 +712,10 @@ class CategoricalStatsHelperFunctionTests(TestCase):
         self.grouping = [1, 2, 1]
         # Ordering of IDs shouldn't matter, nor should extra IDs.
         self.df = pd.read_csv(
-            StringIO('ID,Group\nb,Group2\na,Group1\nc,Group1\nd,Group3'),
+            io.StringIO('ID,Group\nb,Group2\na,Group1\nc,Group1\nd,Group3'),
             index_col=0)
         self.df_missing_id = pd.read_csv(
-            StringIO('ID,Group\nb,Group2\nc,Group1'), index_col=0)
+            io.StringIO('ID,Group\nb,Group2\nc,Group1'), index_col=0)
 
     def test_preprocess_input_with_valid_input(self):
         # Should obtain same result using grouping vector or data frame.
diff --git a/skbio/stats/distance/tests/test_bioenv.py b/skbio/stats/distance/tests/test_bioenv.py
index 54a2c33..aedd99d 100644
--- a/skbio/stats/distance/tests/test_bioenv.py
+++ b/skbio/stats/distance/tests/test_bioenv.py
@@ -6,7 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/stats/distance/tests/test_mantel.py b/skbio/stats/distance/tests/test_mantel.py
index f6c8173..0d14317 100644
--- a/skbio/stats/distance/tests/test_mantel.py
+++ b/skbio/stats/distance/tests/test_mantel.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-
 from unittest import TestCase, main
 
 import numpy as np
@@ -543,7 +540,7 @@ class OrderDistanceMatricesTests(MantelTestData):
         # for the first distance matrix.
         lookup = {'0': 'a', '2': 'c'}
 
-        with six.assertRaisesRegex(self, KeyError, "first.*(x).*'1'\"$"):
+        with self.assertRaisesRegex(KeyError, "first.*(x).*'1'\"$"):
             _order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
 
         # Mapping for 'bar' is missing. Should get an error while remapping IDs
@@ -552,7 +549,7 @@ class OrderDistanceMatricesTests(MantelTestData):
                   'foo': 'a', 'baz': 'c'}
         self.miny_dm.ids = ('foo', 'bar', 'baz')
 
-        with six.assertRaisesRegex(self, KeyError, "second.*(y).*'bar'\"$"):
+        with self.assertRaisesRegex(KeyError, "second.*(y).*'bar'\"$"):
             _order_dms(self.minx_dm, self.miny_dm, lookup=lookup)
 
     def test_nonmatching_ids_strict_true(self):
diff --git a/skbio/stats/distance/tests/test_permanova.py b/skbio/stats/distance/tests/test_permanova.py
index f15803e..1093d5f 100644
--- a/skbio/stats/distance/tests/test_permanova.py
+++ b/skbio/stats/distance/tests/test_permanova.py
@@ -6,9 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from six import StringIO
-
+import io
 from functools import partial
 from unittest import TestCase, main
 
@@ -29,8 +27,8 @@ class TestPERMANOVA(TestCase):
         dm_ids = ['s1', 's2', 's3', 's4']
         self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
         self.df = pd.read_csv(
-            StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
-                     's1,Control'), index_col=0)
+            io.StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
+                        's1,Control'), index_col=0)
 
         self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
                                        [1, 0, 3, 2],
diff --git a/skbio/stats/evolve/__init__.py b/skbio/stats/evolve/__init__.py
index cbfb9cc..bffb453 100644
--- a/skbio/stats/evolve/__init__.py
+++ b/skbio/stats/evolve/__init__.py
@@ -30,8 +30,6 @@ Functions
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._hommola import hommola_cospeciation
diff --git a/skbio/stats/evolve/_hommola.py b/skbio/stats/evolve/_hommola.py
index d491de8..3263cca 100644
--- a/skbio/stats/evolve/_hommola.py
+++ b/skbio/stats/evolve/_hommola.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import range
-
 import numpy as np
 from scipy.stats import pearsonr
 
diff --git a/skbio/stats/evolve/tests/__init__.py b/skbio/stats/evolve/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/stats/evolve/tests/__init__.py
+++ b/skbio/stats/evolve/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/evolve/tests/test_hommola.py b/skbio/stats/evolve/tests/test_hommola.py
index e8a264c..5e91eb8 100644
--- a/skbio/stats/evolve/tests/test_hommola.py
+++ b/skbio/stats/evolve/tests/test_hommola.py
@@ -6,7 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 import unittest
 
 import numpy as np
diff --git a/skbio/stats/gradient.py b/skbio/stats/gradient.py
index 2f86a49..15004b2 100644
--- a/skbio/stats/gradient.py
+++ b/skbio/stats/gradient.py
@@ -93,8 +93,6 @@ Control
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from copy import deepcopy
 from collections import defaultdict
 from numbers import Integral
@@ -201,7 +199,7 @@ def _ANOVA_trajectories(category, res_by_group):
     return CategoryResults(category, p_val, res_by_group, None)
 
 
-class GroupResults(object):
+class GroupResults:
     """Store the trajectory results of a group of a metadata category
 
     Attributes
@@ -258,7 +256,7 @@ class GroupResults(object):
                     % ", ".join(map(str, self.trajectory)))
 
 
-class CategoryResults(object):
+class CategoryResults:
     """Store the trajectory results of a metadata category
 
     Attributes
@@ -308,7 +306,7 @@ class CategoryResults(object):
                 group.to_files(out_f, raw_f)
 
 
-class GradientANOVAResults(object):
+class GradientANOVAResults:
     """Store the trajectory results
 
     Attributes
@@ -359,7 +357,7 @@ class GradientANOVAResults(object):
             raw_f.write('\n')
 
 
-class GradientANOVA(object):
+class GradientANOVA:
     r"""Base class for the Trajectory algorithms
 
     Parameters
diff --git a/skbio/stats/ordination/__init__.py b/skbio/stats/ordination/__init__.py
index 27b067c..ef49cda 100644
--- a/skbio/stats/ordination/__init__.py
+++ b/skbio/stats/ordination/__init__.py
@@ -23,7 +23,16 @@ Functions
    corr
    scale
    svd_rank
+   e_matrix
+   f_matrix
 
+Classes
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   OrdinationResults
 
 Examples
 --------
@@ -112,17 +121,16 @@ References
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._redundancy_analysis import rda
 from ._correspondence_analysis import ca
 from ._canonical_correspondence_analysis import cca
 from ._principal_coordinate_analysis import pcoa
+from ._ordination_results import OrdinationResults
 from ._utils import (mean_and_std, scale, svd_rank, corr, e_matrix, f_matrix)
 
-__all__ = ['ca', 'rda', 'cca', 'pcoa',
+__all__ = ['ca', 'rda', 'cca', 'pcoa', 'OrdinationResults',
            'mean_and_std', 'scale', 'svd_rank', 'corr',
            'e_matrix', 'f_matrix']
 
diff --git a/skbio/stats/ordination/_canonical_correspondence_analysis.py b/skbio/stats/ordination/_canonical_correspondence_analysis.py
index 9c7a213..6c80e50 100644
--- a/skbio/stats/ordination/_canonical_correspondence_analysis.py
+++ b/skbio/stats/ordination/_canonical_correspondence_analysis.py
@@ -6,13 +6,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 import pandas as pd
 from scipy.linalg import svd, lstsq
 
-from skbio._base import OrdinationResults
+from ._ordination_results import OrdinationResults
 from ._utils import corr, svd_rank, scale
 from skbio.util._decorator import experimental
 
@@ -64,9 +62,14 @@ def cca(y, x, scaling=1):
     NotImplementedError
         If scaling is not 1 or 2.
 
+    See Also
+    --------
+    ca
+    rda
+    OrdinationResults
+
     Notes
     -----
-
     The algorithm is based on [3]_, \S 11.2, and is expected to give
     the same results as ``cca(y, x)`` in R's package vegan, except
     that this implementation won't drop constraining variables due to
@@ -83,12 +86,6 @@ def cca(y, x, scaling=1):
     "environmental variables" and is not well suited to analyze
     ecological data.
 
-    See Also
-    --------
-    ca
-    rda
-    OrdinationResults
-
     References
     ----------
     .. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
@@ -223,7 +220,10 @@ def cca(y, x, scaling=1):
                            columns=pc_ids, index=sample_ids)
     features = pd.DataFrame(features_scores,
                             columns=pc_ids, index=feature_ids)
-    biplot_scores = pd.DataFrame(biplot_scores)
+
+    biplot_scores = pd.DataFrame(biplot_scores,
+                                 index=x.columns,
+                                 columns=pc_ids[:biplot_scores.shape[1]])
     sample_constraints = pd.DataFrame(sample_constraints,
                                       index=sample_ids, columns=pc_ids)
 
diff --git a/skbio/stats/ordination/_correspondence_analysis.py b/skbio/stats/ordination/_correspondence_analysis.py
index 1f1ca9c..1b5d965 100644
--- a/skbio/stats/ordination/_correspondence_analysis.py
+++ b/skbio/stats/ordination/_correspondence_analysis.py
@@ -6,13 +6,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 import pandas as pd
 from scipy.linalg import svd
 
-from skbio._base import OrdinationResults
+from ._ordination_results import OrdinationResults
 from ._utils import svd_rank
 from skbio.util._decorator import experimental
 
@@ -81,15 +79,17 @@ def ca(X, scaling=1):
     ValueError
         If any of the input matrix elements are negative.
 
+    See Also
+    --------
+    cca
+    rda
+    OrdinationResults
+
     Notes
     -----
     The algorithm is based on [1]_, \S 9.4.1., and is expected to give the same
     results as ``cca(X)`` in R's package vegan.
 
-    See Also
-    --------
-    cca
-
     References
     ----------
     .. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier,
diff --git a/skbio/_base.py b/skbio/stats/ordination/_ordination_results.py
similarity index 55%
copy from skbio/_base.py
copy to skbio/stats/ordination/_ordination_results.py
index aac8289..5fd6a94 100644
--- a/skbio/_base.py
+++ b/skbio/stats/ordination/_ordination_results.py
@@ -6,424 +6,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import with_metaclass
-from future.builtins import zip
-
-import abc
-import copy
 import functools
 
 import numpy as np
-import pandas as pd
 import matplotlib as mpl
 import matplotlib.pyplot as plt
 from mpl_toolkits.mplot3d import Axes3D  # noqa
 from IPython.core.pylabtools import print_figure
 from IPython.core.display import Image, SVG
 
+from skbio._base import SkbioObject
 from skbio.stats._misc import _pprint_strs
-from skbio.util._decorator import stable, experimental
-
-
-class SkbioObject(with_metaclass(abc.ABCMeta, object)):
-    """Abstract base class defining core API common to all scikit-bio objects.
-
-    Public scikit-bio classes should subclass this class to ensure a common,
-    core API is present. All abstract methods and properties defined here must
-    be implemented in subclasses, otherwise they will not be instantiable.
-
-    """
-    @abc.abstractmethod
-    def __str__(self):
-        pass
-
-
-class MetadataMixin(with_metaclass(abc.ABCMeta, object)):
-    @property
-    @stable(as_of="0.4.0")
-    def metadata(self):
-        """``dict`` containing metadata which applies to the entire object.
-
-        Notes
-        -----
-        This property can be set and deleted. When setting new metadata a
-        shallow copy of the dictionary is made.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with metadata share a common interface for
-           accessing and manipulating their metadata. The following examples
-           use scikit-bio's ``Sequence`` class to demonstrate metadata
-           behavior. These examples apply to all other scikit-bio objects
-           storing metadata.
-
-        Create a sequence with metadata:
-
-        >>> from pprint import pprint
-        >>> from skbio import Sequence
-        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id',
-        ...                                  'description': 'seq description'})
-
-        Retrieve metadata:
-
-        >>> pprint(seq.metadata) # using pprint to display dict in sorted order
-        {'description': 'seq description', 'id': 'seq-id'}
-
-        Update metadata:
-
-        >>> seq.metadata['id'] = 'new-id'
-        >>> seq.metadata['pubmed'] = 12345
-        >>> pprint(seq.metadata)
-        {'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
-
-        Set metadata:
-
-        >>> seq.metadata = {'abc': 123}
-        >>> seq.metadata
-        {'abc': 123}
-
-        Delete metadata:
-
-        >>> seq.has_metadata()
-        True
-        >>> del seq.metadata
-        >>> seq.metadata
-        {}
-        >>> seq.has_metadata()
-        False
-
-        """
-        if self._metadata is None:
-            # Not using setter to avoid copy.
-            self._metadata = {}
-        return self._metadata
-
-    @metadata.setter
-    def metadata(self, metadata):
-        if not isinstance(metadata, dict):
-            raise TypeError("metadata must be a dict")
-        # Shallow copy.
-        self._metadata = metadata.copy()
-
-    @metadata.deleter
-    def metadata(self):
-        self._metadata = None
-
-    @abc.abstractmethod
-    def __init__(self, metadata=None):
-        pass
-
-    def _init_(self, metadata=None):
-        if metadata is None:
-            self._metadata = None
-        else:
-            self.metadata = metadata
-
-    @abc.abstractmethod
-    def __eq__(self, other):
-        pass
-
-    def _eq_(self, other):
-        # We're not simply comparing self.metadata to other.metadata in order
-        # to avoid creating "empty" metadata representations on the objects if
-        # they don't have metadata.
-        if self.has_metadata() and other.has_metadata():
-            if self.metadata != other.metadata:
-                return False
-        elif not (self.has_metadata() or other.has_metadata()):
-            # Both don't have metadata.
-            pass
-        else:
-            # One has metadata while the other does not.
-            return False
-
-        return True
-
-    @abc.abstractmethod
-    def __ne__(self, other):
-        pass
-
-    def _ne_(self, other):
-        return not (self == other)
-
-    @abc.abstractmethod
-    def __copy__(self):
-        pass
-
-    def _copy_(self):
-        if self.has_metadata():
-            return self.metadata.copy()
-        else:
-            return None
-
-    @abc.abstractmethod
-    def __deepcopy__(self, memo):
-        pass
-
-    def _deepcopy_(self, memo):
-        if self.has_metadata():
-            return copy.deepcopy(self.metadata, memo)
-        else:
-            return None
-
-    @stable(as_of="0.4.0")
-    def has_metadata(self):
-        """Determine if the object has metadata.
-
-        An object has metadata if its ``metadata`` dictionary is not empty
-        (i.e., has at least one key-value pair).
-
-        Returns
-        -------
-        bool
-            Indicates whether the object has metadata.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with metadata share a common interface for
-           accessing and manipulating their metadata. The following examples
-           use scikit-bio's ``Sequence`` class to demonstrate metadata
-           behavior. These examples apply to all other scikit-bio objects
-           storing metadata.
-
-        >>> from skbio import Sequence
-        >>> seq = Sequence('ACGT')
-        >>> seq.has_metadata()
-        False
-        >>> seq = Sequence('ACGT', metadata={})
-        >>> seq.has_metadata()
-        False
-        >>> seq = Sequence('ACGT', metadata={'id': 'seq-id'})
-        >>> seq.has_metadata()
-        True
-
-        """
-        return self._metadata is not None and bool(self.metadata)
-
-
-class PositionalMetadataMixin(with_metaclass(abc.ABCMeta, object)):
-    @abc.abstractmethod
-    def _positional_metadata_axis_len_(self):
-        """Return length of axis that positional metadata applies to.
-
-        Returns
-        -------
-        int
-            Positional metadata axis length.
-
-        """
-        pass
-
-    @property
-    @stable(as_of="0.4.0")
-    def positional_metadata(self):
-        """``pd.DataFrame`` containing metadata along an axis.
-
-        Notes
-        -----
-        This property can be set and deleted. When setting new positional
-        metadata a shallow copy is made.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with positional metadata share a common
-           interface for accessing and manipulating their positional metadata.
-           The following examples use scikit-bio's ``DNA`` class to demonstrate
-           positional metadata behavior. These examples apply to all other
-           scikit-bio objects storing positional metadata.
-
-        Create a DNA sequence with positional metadata:
-
-        >>> from skbio import DNA
-        >>> seq = DNA(
-        ...     'ACGT',
-        ...     positional_metadata={'quality': [3, 3, 20, 11],
-        ...                          'exons': [True, True, False, True]})
-        >>> seq
-        DNA
-        -----------------------------
-        Positional metadata:
-            'exons': <dtype: bool>
-            'quality': <dtype: int64>
-        Stats:
-            length: 4
-            has gaps: False
-            has degenerates: False
-            has non-degenerates: True
-            GC-content: 50.00%
-        -----------------------------
-        0 ACGT
-
-        Retrieve positional metadata:
-
-        >>> seq.positional_metadata
-           exons  quality
-        0   True        3
-        1   True        3
-        2  False       20
-        3   True       11
-
-        Update positional metadata:
-
-        >>> seq.positional_metadata['gaps'] = seq.gaps()
-        >>> seq.positional_metadata
-           exons  quality   gaps
-        0   True        3  False
-        1   True        3  False
-        2  False       20  False
-        3   True       11  False
-
-        Set positional metadata:
-
-        >>> seq.positional_metadata = {'degenerates': seq.degenerates()}
-        >>> seq.positional_metadata
-          degenerates
-        0       False
-        1       False
-        2       False
-        3       False
-
-        Delete positional metadata:
-
-        >>> seq.has_positional_metadata()
-        True
-        >>> del seq.positional_metadata
-        >>> seq.positional_metadata
-        Empty DataFrame
-        Columns: []
-        Index: [0, 1, 2, 3]
-        >>> seq.has_positional_metadata()
-        False
-
-        """
-        if self._positional_metadata is None:
-            # Not using setter to avoid copy.
-            self._positional_metadata = pd.DataFrame(
-                index=np.arange(self._positional_metadata_axis_len_()))
-        return self._positional_metadata
-
-    @positional_metadata.setter
-    def positional_metadata(self, positional_metadata):
-        try:
-            # Pass copy=True to copy underlying data buffer.
-            positional_metadata = pd.DataFrame(positional_metadata, copy=True)
-        except pd.core.common.PandasError as e:
-            raise TypeError(
-                "Invalid positional metadata. Must be consumable by "
-                "`pd.DataFrame` constructor. Original pandas error message: "
-                "\"%s\"" % e)
-
-        num_rows = len(positional_metadata.index)
-        axis_len = self._positional_metadata_axis_len_()
-        if num_rows != axis_len:
-            raise ValueError(
-                "Number of positional metadata values (%d) must match the "
-                "positional metadata axis length (%d)."
-                % (num_rows, axis_len))
-
-        positional_metadata.reset_index(drop=True, inplace=True)
-        self._positional_metadata = positional_metadata
-
-    @positional_metadata.deleter
-    def positional_metadata(self):
-        self._positional_metadata = None
-
-    @abc.abstractmethod
-    def __init__(self, positional_metadata=None):
-        pass
-
-    def _init_(self, positional_metadata=None):
-        if positional_metadata is None:
-            self._positional_metadata = None
-        else:
-            self.positional_metadata = positional_metadata
-
-    @abc.abstractmethod
-    def __eq__(self, other):
-        pass
-
-    def _eq_(self, other):
-        # We're not simply comparing self.positional_metadata to
-        # other.positional_metadata in order to avoid creating "empty"
-        # positional metadata representations on the objects if they don't have
-        # positional metadata.
-        if self.has_positional_metadata() and other.has_positional_metadata():
-            if not self.positional_metadata.equals(other.positional_metadata):
-                return False
-        elif not (self.has_positional_metadata() or
-                  other.has_positional_metadata()):
-            # Both don't have positional metadata.
-            pass
-        else:
-            # One has positional metadata while the other does not.
-            return False
-
-        return True
-
-    @abc.abstractmethod
-    def __ne__(self, other):
-        pass
-
-    def _ne_(self, other):
-        return not (self == other)
-
-    @abc.abstractmethod
-    def __copy__(self):
-        pass
-
-    def _copy_(self):
-        if self.has_positional_metadata():
-            # deep=True makes a shallow copy of the underlying data buffer.
-            return self.positional_metadata.copy(deep=True)
-        else:
-            return None
-
-    @abc.abstractmethod
-    def __deepcopy__(self, memo):
-        pass
-
-    def _deepcopy_(self, memo):
-        if self.has_positional_metadata():
-            return copy.deepcopy(self.positional_metadata, memo)
-        else:
-            return None
-
-    @stable(as_of="0.4.0")
-    def has_positional_metadata(self):
-        """Determine if the object has positional metadata.
-
-        An object has positional metadata if its ``positional_metadata``
-        ``pd.DataFrame`` has at least one column.
-
-        Returns
-        -------
-        bool
-            Indicates whether the object has positional metadata.
-
-        Examples
-        --------
-        .. note:: scikit-bio objects with positional metadata share a common
-           interface for accessing and manipulating their positional metadata.
-           The following examples use scikit-bio's ``DNA`` class to demonstrate
-           positional metadata behavior. These examples apply to all other
-           scikit-bio objects storing positional metadata.
-
-        >>> import pandas as pd
-        >>> from skbio import DNA
-        >>> seq = DNA('ACGT')
-        >>> seq.has_positional_metadata()
-        False
-        >>> seq = DNA('ACGT', positional_metadata=pd.DataFrame(index=range(4)))
-        >>> seq.has_positional_metadata()
-        False
-        >>> seq = DNA('ACGT', positional_metadata={'quality': range(4)})
-        >>> seq.has_positional_metadata()
-        True
-
-        """
-        return (self._positional_metadata is not None and
-                len(self.positional_metadata.columns) > 0)
+from skbio.util._decorator import experimental
 
 
 class OrdinationResults(SkbioObject):
@@ -658,9 +252,7 @@ class OrdinationResults(SkbioObject):
         # derived from
         # http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
         fig = plt.figure()
-        # create the axes, leaving room for a legend as described here:
-        # http://stackoverflow.com/a/9651897/3424666
-        ax = fig.add_axes([0.1, 0.1, 0.6, 0.75], projection='3d')
+        ax = fig.add_subplot(111, projection='3d')
 
         xs = coord_matrix[axes[0]]
         ys = coord_matrix[axes[1]]
@@ -695,7 +287,7 @@ class OrdinationResults(SkbioObject):
                 fig.colorbar(plot)
             else:
                 self._plot_categorical_legend(ax, category_to_color)
-
+        fig.tight_layout()
         return fig
 
     def _validate_plot_axes(self, coord_matrix, axes):
@@ -817,37 +409,3 @@ class OrdinationResults(SkbioObject):
         else:
             formatted_attr = formatter(attr)
         return '\t%s: %s' % (attr_label, formatted_attr)
-
-
-class ElasticLines(object):
-    """Store blocks of content separated by dashed lines.
-
-    Each dashed line (separator) is as long as the longest content
-    (non-separator) line.
-
-    """
-
-    def __init__(self):
-        self._lines = []
-        self._separator_idxs = []
-        self._max_line_len = -1
-
-    def add_line(self, line):
-        line_len = len(line)
-        if line_len > self._max_line_len:
-            self._max_line_len = line_len
-        self._lines.append(line)
-
-    def add_lines(self, lines):
-        for line in lines:
-            self.add_line(line)
-
-    def add_separator(self):
-        self._lines.append(None)
-        self._separator_idxs.append(len(self._lines) - 1)
-
-    def to_str(self):
-        separator = '-' * self._max_line_len
-        for idx in self._separator_idxs:
-            self._lines[idx] = separator
-        return '\n'.join(self._lines)
diff --git a/skbio/stats/ordination/_principal_coordinate_analysis.py b/skbio/stats/ordination/_principal_coordinate_analysis.py
index 7320ef9..2313fe4 100644
--- a/skbio/stats/ordination/_principal_coordinate_analysis.py
+++ b/skbio/stats/ordination/_principal_coordinate_analysis.py
@@ -6,18 +6,16 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from warnings import warn
 
 import pandas as pd
 import numpy as np
 from scipy.linalg import eigh
 
-from skbio._base import OrdinationResults
 from skbio.stats.distance import DistanceMatrix
-from ._utils import e_matrix, f_matrix
 from skbio.util._decorator import experimental
+from ._ordination_results import OrdinationResults
+from ._utils import e_matrix, f_matrix
 
 # - In cogent, after computing eigenvalues/vectors, the imaginary part
 #   is dropped, if any. We know for a fact that the eigenvalues are
@@ -34,7 +32,7 @@ def pcoa(distance_matrix):
 
     Principal Coordinate Analysis (PCoA) is a method similar to PCA
     that works from distance matrices, and so it can be used with
-    ecologically meaningful distances like unifrac for bacteria.
+    ecologically meaningful distances like UniFrac for bacteria.
 
     In ecology, the euclidean distance preserved by Principal
     Component Analysis (PCA) is often not a good choice because it
@@ -47,12 +45,23 @@ def pcoa(distance_matrix):
     similar.).
 
     Parameters
-    ==========
+    ----------
     distance_matrix : DistanceMatrix
         A distance matrix.
 
+    Returns
+    -------
+    OrdinationResults
+        Object that stores the PCoA results, including eigenvalues, the
+        proportion explained by each of them, and transformed sample
+        coordinates.
+
+    See Also
+    --------
+    OrdinationResults
+
     Notes
-    =====
+    -----
     It is sometimes known as metric multidimensional scaling or
     classical scaling.
 
diff --git a/skbio/stats/ordination/_redundancy_analysis.py b/skbio/stats/ordination/_redundancy_analysis.py
index eb3507e..d91184e 100644
--- a/skbio/stats/ordination/_redundancy_analysis.py
+++ b/skbio/stats/ordination/_redundancy_analysis.py
@@ -6,15 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 import pandas as pd
 from scipy.linalg import svd, lstsq
 
-from skbio._base import OrdinationResults
-from ._utils import corr, svd_rank, scale
 from skbio.util._decorator import experimental
+from ._ordination_results import OrdinationResults
+from ._utils import corr, svd_rank, scale
 
 
 @experimental(as_of="0.4.0")
@@ -70,14 +68,20 @@ def rda(y, x, scale_Y=False, scaling=1):
         transformed coordinates for feature and samples, biplot
         scores, sample constraints, etc.
 
+    See Also
+    --------
+    ca
+    cca
+    OrdinationResults
+
     Notes
     -----
     The algorithm is based on [1]_, \S 11.1, and is expected to
     give the same results as ``rda(y, x)`` in R's package vegan.
-
-    See Also
-    --------
-    cca
+    The eigenvalues reported in vegan are re-normalized to
+    :math:`\sqrt{\frac{s}{n-1}}` `n` is the number of samples,
+    and `s` is the original eigenvalues. Here we will only return
+    the original eigenvalues, as recommended in [1]_.
 
     References
     ----------
@@ -203,7 +207,10 @@ def rda(y, x, scale_Y=False, scaling=1):
     # can see that there's an arrow for each of the 4
     # environmental variables (depth, coral, sand, other) even if
     # other = not(coral or sand)
-    biplot_scores = pd.DataFrame(corr(X, u))
+    biplot_scores = corr(X, u)
+    biplot_scores = pd.DataFrame(biplot_scores,
+                                 index=x.columns,
+                                 columns=pc_ids[:biplot_scores.shape[1]])
     # The "Correlations of environmental variables with sample
     # scores" from table 11.4 are quite similar to vegan's biplot
     # scores, but they're computed like this:
diff --git a/skbio/stats/ordination/_utils.py b/skbio/stats/ordination/_utils.py
index 409de58..9b807db 100644
--- a/skbio/stats/ordination/_utils.py
+++ b/skbio/stats/ordination/_utils.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 
 from skbio.util._decorator import experimental
diff --git a/skbio/stats/ordination/tests/__init__.py b/skbio/stats/ordination/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/stats/ordination/tests/__init__.py
+++ b/skbio/stats/ordination/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py b/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py
index fafc3f7..b219446 100644
--- a/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py
+++ b/skbio/stats/ordination/tests/test_canonical_correspondence_analysis.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-
 import numpy as np
 import numpy.testing as npt
 import pandas as pd
@@ -62,7 +59,7 @@ class TestCCAResults1(TestCase):
                            'Sample4', 'Sample5', 'Sample6', 'Sample7',
                            'Sample8', 'Sample9']
         self.env_ids = ['Constraint0', 'Constraint1',
-                        'Constraint2', 'Constraint3']
+                        'Constraint2']
         self.pc_ids = ['CCA1', 'CCA2', 'CCA3', 'CCA4', 'CCA5', 'CCA6', 'CCA7',
                        'CCA8', 'CCA9']
         self.Y = pd.DataFrame(
@@ -70,10 +67,10 @@ class TestCCAResults1(TestCase):
             columns=self.feature_ids,
             index=self.sample_ids)
         self.X = pd.DataFrame(
-            np.loadtxt(get_data_path('example3_X')),
+            np.loadtxt(get_data_path('example3_X'))[:, :-1],
             columns=self.env_ids,
             index=self.sample_ids
-            ).ix[:, :-1]
+            )
 
     def test_scaling1(self):
         scores = cca(self.Y, self.X, scaling=1)
@@ -96,10 +93,12 @@ class TestCCAResults1(TestCase):
                 'example3_sample_constraints_scaling1')),
             index=self.sample_ids,
             columns=self.pc_ids)
-
-        biplot_scores = pd.DataFrame(
-            np.loadtxt(get_data_path(
-                'example3_biplot_scaling1')))
+        mat = np.loadtxt(get_data_path(
+            'example3_biplot_scaling1'))
+        cropped_pcs = self.pc_ids[:mat.shape[1]]
+        biplot_scores = pd.DataFrame(mat,
+                                     index=self.env_ids,
+                                     columns=cropped_pcs)
 
         proportion_explained = pd.Series([0.466911, 0.238327, 0.100548,
                                           0.104937, 0.044805, 0.029747,
@@ -119,7 +118,6 @@ class TestCCAResults1(TestCase):
             eigvals=eigvals)
 
         assert_ordination_results_equal(scores, exp,
-                                        ignore_biplot_scores_labels=True,
                                         decimal=6)
 
     def test_scaling2(self):
@@ -144,9 +142,13 @@ class TestCCAResults1(TestCase):
             index=self.sample_ids,
             columns=self.pc_ids)
 
-        biplot_scores = pd.DataFrame(
-            np.loadtxt(get_data_path(
-                'example3_biplot_scaling2')))
+        mat = np.loadtxt(get_data_path(
+            'example3_biplot_scaling2'))
+
+        cropped_pc_ids = self.pc_ids[:mat.shape[1]]
+        biplot_scores = pd.DataFrame(mat,
+                                     index=self.env_ids,
+                                     columns=cropped_pc_ids)
 
         proportion_explained = pd.Series([0.466911, 0.238327, 0.100548,
                                           0.104937, 0.044805, 0.029747,
@@ -166,7 +168,6 @@ class TestCCAResults1(TestCase):
             eigvals=eigvals)
 
         assert_ordination_results_equal(scores, exp,
-                                        ignore_biplot_scores_labels=True,
                                         decimal=6)
 
 
diff --git a/skbio/stats/ordination/tests/test_correspondence_analysis.py b/skbio/stats/ordination/tests/test_correspondence_analysis.py
index d86f7f8..79b2a88 100644
--- a/skbio/stats/ordination/tests/test_correspondence_analysis.py
+++ b/skbio/stats/ordination/tests/test_correspondence_analysis.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import numpy as np
 import numpy.testing as npt
 import pandas as pd
diff --git a/skbio/tests/test_base.py b/skbio/stats/ordination/tests/test_ordination_results.py
similarity index 67%
copy from skbio/tests/test_base.py
copy to skbio/stats/ordination/tests/test_ordination_results.py
index 66562fb..90856a8 100644
--- a/skbio/tests/test_base.py
+++ b/skbio/stats/ordination/tests/test_ordination_results.py
@@ -6,95 +6,16 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-from six import binary_type, text_type
-
 import unittest
+
 import matplotlib as mpl
 import matplotlib.pyplot as plt
 import numpy as np
 import numpy.testing as npt
 import pandas as pd
 from IPython.core.display import Image, SVG
-from nose.tools import assert_is_instance, assert_true
 
 from skbio import OrdinationResults
-from skbio._base import (SkbioObject, MetadataMixin, PositionalMetadataMixin,
-                         ElasticLines)
-from skbio.util._decorator import overrides
-from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
-                                 PositionalMetadataMixinTests)
-
-
-class TestSkbioObject(unittest.TestCase):
-    def test_no_instantiation(self):
-        class Foo(SkbioObject):
-            pass
-
-        with self.assertRaises(TypeError):
-            Foo()
-
-
-class TestMetadataMixin(unittest.TestCase, ReallyEqualMixin,
-                        MetadataMixinTests):
-    def setUp(self):
-        class ExampleMetadataMixin(MetadataMixin):
-            def __init__(self, metadata=None):
-                MetadataMixin._init_(self, metadata=metadata)
-
-            def __eq__(self, other):
-                return MetadataMixin._eq_(self, other)
-
-            def __ne__(self, other):
-                return MetadataMixin._ne_(self, other)
-
-            def __copy__(self):
-                copy = self.__class__(metadata=None)
-                copy._metadata = MetadataMixin._copy_(self)
-                return copy
-
-            def __deepcopy__(self, memo):
-                copy = self.__class__(metadata=None)
-                copy._metadata = MetadataMixin._deepcopy_(self, memo)
-                return copy
-
-        self._metadata_constructor_ = ExampleMetadataMixin
-
-
-class TestPositionalMetadataMixin(unittest.TestCase, ReallyEqualMixin,
-                                  PositionalMetadataMixinTests):
-    def setUp(self):
-        class ExamplePositionalMetadataMixin(PositionalMetadataMixin):
-            @overrides(PositionalMetadataMixin)
-            def _positional_metadata_axis_len_(self):
-                return self._axis_len
-
-            def __init__(self, axis_len, positional_metadata=None):
-                self._axis_len = axis_len
-
-                PositionalMetadataMixin._init_(
-                    self, positional_metadata=positional_metadata)
-
-            def __eq__(self, other):
-                return PositionalMetadataMixin._eq_(self, other)
-
-            def __ne__(self, other):
-                return PositionalMetadataMixin._ne_(self, other)
-
-            def __copy__(self):
-                copy = self.__class__(self._axis_len, positional_metadata=None)
-                copy._positional_metadata = \
-                    PositionalMetadataMixin._copy_(self)
-                return copy
-
-            def __deepcopy__(self, memo):
-                copy = self.__class__(self._axis_len, positional_metadata=None)
-                copy._positional_metadata = \
-                    PositionalMetadataMixin._deepcopy_(self, memo)
-                return copy
-
-        self._positional_metadata_constructor_ = ExamplePositionalMetadataMixin
 
 
 class TestOrdinationResults(unittest.TestCase):
@@ -177,7 +98,7 @@ class TestOrdinationResults(unittest.TestCase):
                                   exp_legend_exists, exp_xlabel, exp_ylabel,
                                   exp_zlabel):
         # check type
-        assert_is_instance(fig, mpl.figure.Figure)
+        self.assertIsInstance(fig, mpl.figure.Figure)
 
         # check number of subplots
         axes = fig.get_axes()
@@ -195,9 +116,9 @@ class TestOrdinationResults(unittest.TestCase):
         # check if legend is present
         legend = ax.get_legend()
         if exp_legend_exists:
-            assert_true(legend is not None)
+            self.assertTrue(legend is not None)
         else:
-            assert_true(legend is None)
+            self.assertTrue(legend is None)
 
         # check axis labels
         npt.assert_equal(ax.get_xlabel(), exp_xlabel)
@@ -222,7 +143,7 @@ class TestOrdinationResults(unittest.TestCase):
         self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
 
     def test_plot_with_invalid_axis_labels(self):
-        with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
+        with self.assertRaisesRegex(ValueError, 'axis_labels.*4'):
             self.min_ord_results.plot(axes=[2, 0, 1],
                                       axis_labels=('a', 'b', 'c', 'd'))
 
@@ -234,27 +155,27 @@ class TestOrdinationResults(unittest.TestCase):
 
     def test_validate_plot_axes_invalid_input(self):
         # not enough dimensions
-        with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
+        with self.assertRaisesRegex(ValueError, '2 dimension\(s\)'):
             self.min_ord_results._validate_plot_axes(
                 np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
 
         coord_matrix = self.min_ord_results.samples.values.T
 
         # wrong number of axes
-        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
+        with self.assertRaisesRegex(ValueError, 'exactly three.*found 0'):
             self.min_ord_results._validate_plot_axes(coord_matrix, [])
-        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
+        with self.assertRaisesRegex(ValueError, 'exactly three.*found 4'):
             self.min_ord_results._validate_plot_axes(coord_matrix,
                                                      (0, 1, 2, 3))
 
         # duplicate axes
-        with six.assertRaisesRegex(self, ValueError, 'must be unique'):
+        with self.assertRaisesRegex(ValueError, 'must be unique'):
             self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
 
         # out of range axes
-        with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
+        with self.assertRaisesRegex(ValueError, 'axes\[1\].*3'):
             self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
-        with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
+        with self.assertRaisesRegex(ValueError, 'axes\[2\].*3'):
             self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
 
     def test_get_plot_point_colors_invalid_input(self):
@@ -269,17 +190,17 @@ class TestOrdinationResults(unittest.TestCase):
                                                         ['B', 'C'], 'jet')
 
         # column not in df
-        with six.assertRaisesRegex(self, ValueError, 'missingcol'):
+        with self.assertRaisesRegex(ValueError, 'missingcol'):
             self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
                                                         ['B', 'C'], 'jet')
 
         # id not in df
-        with six.assertRaisesRegex(self, ValueError, 'numeric'):
+        with self.assertRaisesRegex(ValueError, 'numeric'):
             self.min_ord_results._get_plot_point_colors(
                 self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
 
         # missing data in df
-        with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
+        with self.assertRaisesRegex(ValueError, 'nancolumn'):
             self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
                                                         ['B', 'C', 'A'], 'jet')
 
@@ -294,14 +215,14 @@ class TestOrdinationResults(unittest.TestCase):
         obs = self.min_ord_results._get_plot_point_colors(
             self.df, 'numeric', ['B', 'C', 'A'], 'jet')
         npt.assert_almost_equal(obs[0], exp)
-        assert_true(obs[1] is None)
+        self.assertTrue(obs[1] is None)
 
         # all ids in df
         exp = [0.0, 42.0, 42.19, -4.2]
         obs = self.min_ord_results._get_plot_point_colors(
             self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
         npt.assert_almost_equal(obs[0], exp)
-        assert_true(obs[1] is None)
+        self.assertTrue(obs[1] is None)
 
     def test_get_plot_point_colors_categorical_column(self):
         # subset of the ids in df
@@ -329,14 +250,14 @@ class TestOrdinationResults(unittest.TestCase):
         ax = fig.add_subplot(111, projection='3d')
 
         # we shouldn't have a legend yet
-        assert_true(ax.get_legend() is None)
+        self.assertTrue(ax.get_legend() is None)
 
         self.min_ord_results._plot_categorical_legend(
             ax, {'foo': 'red', 'bar': 'green'})
 
         # make sure we have a legend now
         legend = ax.get_legend()
-        assert_true(legend is not None)
+        self.assertTrue(legend is not None)
 
         # do some light sanity checking to make sure our input labels and
         # colors are present. we're not using nose.tools.assert_items_equal
@@ -349,52 +270,19 @@ class TestOrdinationResults(unittest.TestCase):
 
     def test_repr_png(self):
         obs = self.min_ord_results._repr_png_()
-        assert_is_instance(obs, binary_type)
-        assert_true(len(obs) > 0)
+        self.assertIsInstance(obs, bytes)
+        self.assertTrue(len(obs) > 0)
 
     def test_repr_svg(self):
         obs = self.min_ord_results._repr_svg_()
-        # print_figure(format='svg') can return text or bytes depending on the
-        # version of IPython
-        assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
-        assert_true(len(obs) > 0)
+        self.assertIsInstance(obs, str)
+        self.assertTrue(len(obs) > 0)
 
     def test_png(self):
-        assert_is_instance(self.min_ord_results.png, Image)
+        self.assertIsInstance(self.min_ord_results.png, Image)
 
     def test_svg(self):
-        assert_is_instance(self.min_ord_results.svg, SVG)
-
-
-class TestElasticLines(unittest.TestCase):
-    def setUp(self):
-        self.el = ElasticLines()
-
-    def test_empty(self):
-        self.assertEqual(self.el.to_str(), '')
-
-    def test_add_line(self):
-        self.el.add_line('foo')
-        self.assertEqual(self.el.to_str(), 'foo')
-
-    def test_add_lines(self):
-        self.el = ElasticLines()
-        self.el.add_lines(['alice', 'bob', 'carol'])
-        self.assertEqual(self.el.to_str(), 'alice\nbob\ncarol')
-
-    def test_add_separator(self):
-        self.el.add_separator()
-        self.assertEqual(self.el.to_str(), '')
-
-        self.el.add_line('foo')
-        self.assertEqual(self.el.to_str(), '---\nfoo')
-
-        self.el.add_separator()
-        self.el.add_lines(['bar', 'bazzzz'])
-        self.el.add_separator()
-
-        self.assertEqual(self.el.to_str(),
-                         '------\nfoo\n------\nbar\nbazzzz\n------')
+        self.assertIsInstance(self.min_ord_results.svg, SVG)
 
 
 if __name__ == '__main__':
diff --git a/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py b/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
index 585edf2..08e9963 100644
--- a/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
+++ b/skbio/stats/ordination/tests/test_principal_coordinate_analysis.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import pandas as pd
 import numpy as np
 import numpy.testing as npt
diff --git a/skbio/stats/ordination/tests/test_redundancy_analysis.py b/skbio/stats/ordination/tests/test_redundancy_analysis.py
index 10c0d88..8948933 100644
--- a/skbio/stats/ordination/tests/test_redundancy_analysis.py
+++ b/skbio/stats/ordination/tests/test_redundancy_analysis.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-
 import numpy as np
 import numpy.testing as npt
 import pandas as pd
@@ -41,7 +38,7 @@ class TestRDAResults(TestCase):
                            'Site5', 'Site6', 'Site7', 'Site8', 'Site9']
         self.feature_ids = ['Species0', 'Species1', 'Species2', 'Species3',
                             'Species4', 'Species5']
-        self.env_ids = map(str, range(4))
+        self.env_ids = list(map(str, range(4)))
         self.pc_ids = ['RDA1', 'RDA2', 'RDA3', 'RDA4', 'RDA5', 'RDA6', 'RDA7']
 
         self.Y = pd.DataFrame(
@@ -56,9 +53,6 @@ class TestRDAResults(TestCase):
 
         scores = rda(self.Y, self.X, scaling=1)
 
-        biplot_scores = pd.DataFrame(np.loadtxt(
-            get_data_path('example2_biplot_scaling1')))
-
         sample_constraints = pd.DataFrame(np.loadtxt(
             get_data_path('example2_sample_constraints_scaling1')))
 
@@ -80,18 +74,19 @@ class TestRDAResults(TestCase):
                 'example2_sample_constraints_scaling1')),
             index=self.sample_ids,
             columns=self.pc_ids)
+        mat = np.loadtxt(get_data_path(
+            'example2_biplot_scaling1'))
+        cropped_pc_ids = self.pc_ids[:mat.shape[1]]
+        biplot_scores = pd.DataFrame(mat,
+                                     index=self.env_ids,
+                                     columns=cropped_pc_ids)
 
-        biplot_scores = pd.DataFrame(
-            np.loadtxt(get_data_path(
-                'example2_biplot_scaling1')))
-
-        # These are wrong. See issue #1002
         proportion_explained = pd.Series([0.44275783, 0.25614586,
                                           0.15280354, 0.10497021,
                                           0.02873375, 0.00987052,
                                           0.00471828],
                                          index=self.pc_ids)
-        # These are wrong. See issue #1002
+
         eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
                              1.680705, 0.577350, 0.275984],
                             index=self.pc_ids)
@@ -107,15 +102,16 @@ class TestRDAResults(TestCase):
 
         assert_ordination_results_equal(scores, exp,
                                         ignore_directionality=True,
-                                        ignore_biplot_scores_labels=True,
                                         decimal=6)
 
     def test_scaling2(self):
 
         scores = rda(self.Y, self.X, scaling=2)
-
-        biplot_scores = pd.DataFrame(np.loadtxt(
-            get_data_path('example2_biplot_scaling2')))
+        mat = np.loadtxt(get_data_path('example2_biplot_scaling2'))
+        cropped_pc_ids = self.pc_ids[:mat.shape[1]]
+        biplot_scores = pd.DataFrame(mat,
+                                     index=self.env_ids,
+                                     columns=cropped_pc_ids)
 
         sample_constraints = pd.DataFrame(np.loadtxt(
             get_data_path('example2_sample_constraints_scaling2')))
@@ -139,17 +135,19 @@ class TestRDAResults(TestCase):
             index=self.sample_ids,
             columns=self.pc_ids)
 
-        biplot_scores = pd.DataFrame(
-            np.loadtxt(get_data_path(
-                'example2_biplot_scaling2')))
+        mat = np.loadtxt(get_data_path(
+            'example2_biplot_scaling2'))
+        cropped_pc_ids = self.pc_ids[:mat.shape[1]]
+        biplot_scores = pd.DataFrame(mat,
+                                     index=self.env_ids,
+                                     columns=cropped_pc_ids)
 
-        # These are wrong. See issue #1002
         proportion_explained = pd.Series([0.44275783, 0.25614586,
                                           0.15280354, 0.10497021,
                                           0.02873375, 0.00987052,
                                           0.00471828],
                                          index=self.pc_ids)
-        # These are wrong. See issue #1002
+
         eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
                              1.680705, 0.577350, 0.275984],
                             index=self.pc_ids)
@@ -165,7 +163,6 @@ class TestRDAResults(TestCase):
 
         assert_ordination_results_equal(scores, exp,
                                         ignore_directionality=True,
-                                        ignore_biplot_scores_labels=True,
                                         decimal=6)
 
 
diff --git a/skbio/stats/ordination/tests/test_util.py b/skbio/stats/ordination/tests/test_util.py
index 785b861..83634c9 100644
--- a/skbio/stats/ordination/tests/test_util.py
+++ b/skbio/stats/ordination/tests/test_util.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-
 import numpy as np
 import numpy.testing as npt
 
diff --git a/skbio/stats/power.py b/skbio/stats/power.py
index 3fbe84b..fd81ab8 100644
--- a/skbio/stats/power.py
+++ b/skbio/stats/power.py
@@ -140,16 +140,11 @@ we need to be confident that we have not committed a type II error increases.
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import viewitems
-from future.builtins import range
-
 import collections
 import copy
 
 import numpy as np
 import scipy.stats
-import six
 
 from skbio.util._decorator import experimental
 
@@ -728,7 +723,7 @@ def _get_min_size(meta, cat, control_cats, order, strict_match):
 def _check_nans(x, switch=False):
     r"""Returns False if x is a nan and True is x is a string or number
     """
-    if isinstance(x, six.string_types):
+    if isinstance(x, str):
         return True
     elif isinstance(x, (float, int)):
         return not np.isnan(x)
@@ -1012,7 +1007,7 @@ def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
     ctrl_groups = meta.groupby(control_cats).groups
     # Identifies the samples that satisfy the control pairs. Keys are iterated
     # in sorted order so that results don't change with different dictionary
-    # ordering (especially apparent in Python 3).
+    # ordering.
     for g in sorted(ctrl_groups, key=lambda k: str(k)):
         ids = ctrl_groups[g]
         # If strict_match, Skips over data that has nans
@@ -1078,7 +1073,7 @@ def _draw_paired_samples(meta_pairs, index, num_samps):
     subs = []
 
     # Draws the other groups
-    for set_, num_ in viewitems(collections.Counter(set_pos)):
+    for set_, num_ in collections.Counter(set_pos).items():
         r2 = [np.random.choice(col, num_, replace=False) for col in
               meta_pairs[set_]]
         subs.append(r2)
diff --git a/skbio/stats/tests/__init__.py b/skbio/stats/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/stats/tests/__init__.py
+++ b/skbio/stats/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/stats/tests/test_composition.py b/skbio/stats/tests/test_composition.py
index f8e9f3c..b0d9b75 100644
--- a/skbio/stats/tests/test_composition.py
+++ b/skbio/stats/tests/test_composition.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 import numpy as np
 import numpy.testing as npt
@@ -548,12 +546,199 @@ class AncomTests(TestCase):
         assert_data_frame_almost_equal(original_table, test_table)
         # Test to make sure that the input table hasn't be altered
         pdt.assert_series_equal(original_cats, test_cats)
-        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
-                            'reject': np.array([True, True, False, False,
-                                                False, False, False],
-                                               dtype=bool)})
+        exp = pd.DataFrame(
+            {'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 False, False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
+
+    def test_ancom_percentiles(self):
+        table = pd.DataFrame([[12, 11],
+                              [9, 11],
+                              [1, 11],
+                              [22, 100],
+                              [20, 53],
+                              [23, 1]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1', 'b2'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        percentiles = [0.0, 25.0, 50.0, 75.0, 100.0]
+        groups = ['a', 'b']
+        tuples = [(p, g) for g in groups for p in percentiles]
+        exp_mi = pd.MultiIndex.from_tuples(tuples,
+                                           names=['Percentile', 'Group'])
+        exp_data = np.array(
+            [[1.0, 11.0], [5.0, 11.0], [9.0, 11.0], [10.5, 11.0], [12.0, 11.0],
+             [20.0, 1.0], [21.0, 27.0], [22.0, 53.0], [22.5, 76.5],
+             [23.0, 100.0]])
+        exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1', 'b2'])
+
+        result = ancom(table, grouping)[1]
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_percentiles_alt_categories(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'c', 'b', 'b', 'c'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        percentiles = [0.0, 25.0, 50.0, 75.0, 100.0]
+        groups = ['a', 'b', 'c']
+        tuples = [(p, g) for g in groups for p in percentiles]
+        exp_mi = pd.MultiIndex.from_tuples(tuples,
+                                           names=['Percentile', 'Group'])
+        exp_data = np.array([[9.0], [9.75], [10.5], [11.25], [12.0],  # a
+                             [20.0], [20.5], [21.0], [21.5], [22.0],  # b
+                             [1.0], [6.5], [12.0], [17.5], [23.0]])   # c
+        exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
+
+        result = ancom(table, grouping, percentiles=percentiles)[1]
         assert_data_frame_almost_equal(result, exp)
 
+    def test_ancom_alt_percentiles(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        percentiles = [42.0, 50.0]
+        groups = ['a', 'b']
+        tuples = [(p, g) for g in groups for p in percentiles]
+        exp_mi = pd.MultiIndex.from_tuples(tuples,
+                                           names=['Percentile', 'Group'])
+        exp_data = np.array([[7.71999999], [9.0],  # a
+                             [21.68], [22.0]])     # b
+        exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
+
+        result = ancom(table, grouping, percentiles=percentiles)[1]
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_percentiles_swapped(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'b', 'a', 'b', 'b'],
+                             index=['s1', 's2', 's4', 's3', 's5', 's6'])
+
+        percentiles = [42.0, 50.0]
+        groups = ['a', 'b']
+        tuples = [(p, g) for g in groups for p in percentiles]
+        exp_mi = pd.MultiIndex.from_tuples(tuples,
+                                           names=['Percentile', 'Group'])
+        exp_data = np.array([[7.71999999], [9.0],  # a
+                             [21.68], [22.0]])     # b
+        exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
+
+        result = ancom(table, grouping, percentiles=percentiles)[1]
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_percentile_order_unimportant(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+        # order of percentiles in unimportant after sorting
+        result1 = ancom(table, grouping, percentiles=[50.0, 42.0])[1]
+        result2 = ancom(table, grouping, percentiles=[42.0, 50.0])[1]
+        assert_data_frame_almost_equal(
+            result1.sort_index(axis=1), result2.sort_index(axis=1))
+
+    def test_ancom_percentiles_iterator(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+
+        percentiles = [42.0, 50.0]
+        groups = ['a', 'b']
+        tuples = [(p, g) for g in groups for p in percentiles]
+        exp_mi = pd.MultiIndex.from_tuples(tuples,
+                                           names=['Percentile', 'Group'])
+        exp_data = np.array([[7.71999999], [9.0],  # a
+                             [21.68], [22.0]])     # b
+        exp = pd.DataFrame(exp_data.T, columns=exp_mi, index=['b1'])
+
+        result = ancom(table, grouping, percentiles=iter(percentiles))[1]
+        assert_data_frame_almost_equal(result, exp)
+
+    def test_ancom_no_percentiles(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+        result = ancom(table, grouping, percentiles=[])[1]
+        assert_data_frame_almost_equal(result, pd.DataFrame())
+
+    def test_ancom_percentile_out_of_range(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+        with self.assertRaises(ValueError):
+            ancom(table, grouping, percentiles=[-1.0])
+        with self.assertRaises(ValueError):
+            ancom(table, grouping, percentiles=[100.1])
+        with self.assertRaises(ValueError):
+            ancom(table, grouping, percentiles=[10.0, 3.0, 101.0, 100])
+
+    def test_ancom_duplicate_percentiles(self):
+        table = pd.DataFrame([[12],
+                              [9],
+                              [1],
+                              [22],
+                              [20],
+                              [23]],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'],
+                             columns=['b1'])
+        grouping = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'],
+                             index=['s1', 's2', 's3', 's4', 's5', 's6'])
+        with self.assertRaises(ValueError):
+            ancom(table, grouping, percentiles=[10.0, 10.0])
+
     def test_ancom_basic_proportions(self):
         # Converts from counts to proportions
         test_table = pd.DataFrame(closure(self.table1))
@@ -567,11 +752,12 @@ class AncomTests(TestCase):
         assert_data_frame_almost_equal(original_table, test_table)
         # Test to make sure that the input table hasn't be altered
         pdt.assert_series_equal(original_cats, test_cats)
-        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
-                            'reject': np.array([True, True, False, False,
-                                                False, False, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 False, False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_multiple_groups(self):
         test_table = pd.DataFrame(self.table4)
@@ -583,135 +769,152 @@ class AncomTests(TestCase):
         assert_data_frame_almost_equal(original_table, test_table)
         # Test to make sure that the input table hasn't be altered
         pdt.assert_series_equal(original_cats, test_cats)
-        exp = pd.DataFrame({'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
-                            'reject': np.array([True, True, False, False,
-                                                True, False, False, False,
-                                                False], dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 True, False, False, False,
+                                                 False], dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_noncontiguous(self):
         result = ancom(self.table5,
                        self.cats5,
                        multiple_comparisons_correction=None)
-        exp = pd.DataFrame({'W': np.array([6, 2, 2, 2, 2, 6, 2]),
-                            'reject': np.array([True, False, False, False,
-                                                False, True, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([6, 2, 2, 2, 2, 6, 2]),
+             'Reject null hypothesis': np.array([True, False, False, False,
+                                                 False, True, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_unbalanced(self):
         result = ancom(self.table6,
                        self.cats6,
                        multiple_comparisons_correction=None)
-        exp = pd.DataFrame({'W': np.array([5, 3, 3, 2, 2, 5, 2]),
-                            'reject': np.array([True, False, False, False,
-                                                False, True, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([5, 3, 3, 2, 2, 5, 2]),
+             'Reject null hypothesis': np.array([True, False, False, False,
+                                                 False, True, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_letter_categories(self):
         result = ancom(self.table7,
                        self.cats7,
                        multiple_comparisons_correction=None)
-        exp = pd.DataFrame({'W': np.array([5, 3, 3, 2, 2, 5, 2]),
-                            'reject': np.array([True, False, False, False,
-                                                False, True, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([5, 3, 3, 2, 2, 5, 2]),
+             'Reject null hypothesis': np.array([True, False, False, False,
+                                                 False, True, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_multiple_comparisons(self):
         result = ancom(self.table1,
                        self.cats1,
                        multiple_comparisons_correction='holm-bonferroni',
                        significance_test=scipy.stats.mannwhitneyu)
-        exp = pd.DataFrame({'W': np.array([0]*7),
-                            'reject': np.array([False]*7, dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([0]*7),
+             'Reject null hypothesis': np.array([False]*7, dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_alternative_test(self):
         result = ancom(self.table1,
                        self.cats1,
                        multiple_comparisons_correction=None,
                        significance_test=scipy.stats.ttest_ind)
-        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
-                            'reject': np.array([True,  True, False, False,
-                                                False, False, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+             'Reject null hypothesis': np.array([True,  True, False, False,
+                                                 False, False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_normal_data(self):
         result = ancom(self.table2,
                        self.cats2,
                        multiple_comparisons_correction=None,
                        significance_test=scipy.stats.ttest_ind)
-        exp = pd.DataFrame({'W': np.array([8, 8, 3, 3,
-                                           8, 3, 3, 3, 3]),
-                            'reject': np.array([True, True, False, False,
-                                                True, False, False,
-                                                False, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([8, 8, 3, 3, 8, 3, 3, 3, 3]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 True, False, False,
+                                                 False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_basic_counts_swapped(self):
         result = ancom(self.table8, self.cats8)
-        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
-                            'reject': np.array([True, True, False, False,
-                                                False, False, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 False, False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_no_signal(self):
         result = ancom(self.table3,
                        self.cats3,
                        multiple_comparisons_correction=None)
-        exp = pd.DataFrame({'W': np.array([0]*7),
-                            'reject': np.array([False]*7, dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([0]*7),
+             'Reject null hypothesis': np.array([False]*7, dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_tau(self):
-        exp1 = pd.DataFrame({'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
-                            'reject': np.array([True, False, False, False,
-                                                False, False, False, False,
-                                                False], dtype=bool)})
-        exp2 = pd.DataFrame({'W': np.array([17, 17, 5, 6, 16, 5, 7, 5,
-                                            4, 5, 8, 4, 5, 16, 5, 11, 4, 6]),
-                            'reject': np.array([True, True, False, False,
-                                                True, False, False, False,
-                                                False, False, False, False,
-                                                False, True, False, False,
-                                                False, False],  dtype=bool)})
-        exp3 = pd.DataFrame({'W': np.array([16, 16, 17, 10, 17, 16, 16,
-                                            15, 15, 15, 13, 10, 10, 10,
-                                            9, 9, 9, 9]),
-                            'reject': np.array([True, True, True, False,
-                                                True, True, True, True,
-                                                True, True, True, False,
-                                                False, False, False, False,
-                                                False, False],  dtype=bool)})
-
-        result1 = ancom(self.table4, self.cats4, tau=0.25)
-        result2 = ancom(self.table9, self.cats9, tau=0.02)
-        result3 = ancom(self.table10, self.cats10, tau=0.02)
-
-        assert_data_frame_almost_equal(result1, exp1)
-        assert_data_frame_almost_equal(result2, exp2)
-        assert_data_frame_almost_equal(result3, exp3)
+        exp1 = pd.DataFrame(
+            {'W': np.array([8, 7, 3, 3, 7, 3, 3, 3, 3]),
+             'Reject null hypothesis': np.array([True, False, False, False,
+                                                 False, False, False, False,
+                                                 False], dtype=bool)})
+        exp2 = pd.DataFrame(
+            {'W': np.array([17, 17, 5, 6, 16, 5, 7, 5,
+                            4, 5, 8, 4, 5, 16, 5, 11, 4, 6]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 True, False, False, False,
+                                                 False, False, False, False,
+                                                 False, True, False, False,
+                                                 False, False],  dtype=bool)})
+        exp3 = pd.DataFrame(
+            {'W': np.array([16, 16, 17, 10, 17, 16, 16,
+                            15, 15, 15, 13, 10, 10, 10,
+                            9, 9, 9, 9]),
+             'Reject null hypothesis': np.array([True, True, True, False,
+                                                 True, True, True, True,
+                                                 True, True, True, False,
+                                                 False, False, False, False,
+                                                 False, False], dtype=bool)})
+
+        result1 = ancom(self.table4, self.cats4,
+                        multiple_comparisons_correction=None, tau=0.25)
+        result2 = ancom(self.table9, self.cats9,
+                        multiple_comparisons_correction=None, tau=0.02)
+        result3 = ancom(self.table10, self.cats10,
+                        multiple_comparisons_correction=None, tau=0.02)
+
+        assert_data_frame_almost_equal(result1[0], exp1)
+        assert_data_frame_almost_equal(result2[0], exp2)
+        assert_data_frame_almost_equal(result3[0], exp3)
 
     def test_ancom_theta(self):
         result = ancom(self.table1, self.cats1, theta=0.3)
-        exp = pd.DataFrame({'W': np.array([5, 5, 2, 2, 2, 2, 2]),
-                            'reject': np.array([True, True, False, False,
-                                                False, False, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        exp = pd.DataFrame(
+            {'W': np.array([5, 5, 2, 2, 2, 2, 2]),
+             'Reject null hypothesis': np.array([True, True, False, False,
+                                                 False, False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_alpha(self):
-        result = ancom(self.table1, self.cats1, alpha=0.5)
-        exp = pd.DataFrame({'W': np.array([6, 6, 4, 5, 5, 4, 2]),
-                            'reject': np.array([True, True, False, True,
-                                                True, False, False],
-                                               dtype=bool)})
-        assert_data_frame_almost_equal(result, exp)
+        result = ancom(self.table1, self.cats1,
+                       multiple_comparisons_correction=None, alpha=0.5)
+        exp = pd.DataFrame(
+            {'W': np.array([6, 6, 4, 5, 5, 4, 2]),
+             'Reject null hypothesis': np.array([True, True, False, True,
+                                                 True, False, False],
+                                                dtype=bool)})
+        assert_data_frame_almost_equal(result[0], exp)
 
     def test_ancom_fail_type(self):
         with self.assertRaises(TypeError):
diff --git a/skbio/stats/tests/test_gradient.py b/skbio/stats/tests/test_gradient.py
index 872d73b..68fdd9d 100644
--- a/skbio/stats/tests/test_gradient.py
+++ b/skbio/stats/tests/test_gradient.py
@@ -6,10 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from six import StringIO
-from future.builtins import zip
-
+import io
 from operator import attrgetter
 from unittest import TestCase, main
 
@@ -257,10 +254,14 @@ class GradientTests(BaseTests):
         w_vector = pd.Series(np.array([1, 2, 3, 4, 5, 6, 7, 8]),
                              ['s1', 's2', 's3', 's4',
                               's5', 's6', 's7', 's8']).astype(np.float64)
-        exp = pd.DataFrame.from_dict({'s1': np.array([1]), 's2': np.array([2]),
-                                      's3': np.array([3]), 's4': np.array([4]),
-                                      's5': np.array([5]), 's6': np.array([6]),
-                                      's7': np.array([7]), 's8': np.array([8])
+        exp = pd.DataFrame.from_dict({'s1': np.array([1.0]),
+                                      's2': np.array([2.0]),
+                                      's3': np.array([3.0]),
+                                      's4': np.array([4.0]),
+                                      's5': np.array([5.0]),
+                                      's6': np.array([6.0]),
+                                      's7': np.array([7.0]),
+                                      's8': np.array([8.0])
                                       },
                                      orient='index')
         obs = _weight_by_vector(trajectory, w_vector)
@@ -275,9 +276,11 @@ class GradientTests(BaseTests):
         trajectory.sort_values(by=0, inplace=True)
         w_vector = pd.Series(np.array([25, 30, 35, 40, 45]),
                              ['s2', 's3', 's4', 's5', 's6']).astype(np.float64)
-        exp = pd.DataFrame.from_dict({'s2': np.array([2]), 's3': np.array([3]),
-                                      's4': np.array([4]), 's5': np.array([5]),
-                                      's6': np.array([6])}, orient='index')
+        exp = pd.DataFrame.from_dict({'s2': np.array([2.0]),
+                                      's3': np.array([3.0]),
+                                      's4': np.array([4.0]),
+                                      's5': np.array([5.0]),
+                                      's6': np.array([6.0])}, orient='index')
         obs = _weight_by_vector(trajectory, w_vector)
         assert_data_frame_almost_equal(obs.sort_index(), exp.sort_index())
 
@@ -401,8 +404,8 @@ class GroupResultsTests(BaseTests):
         raw_paths = ['gr_wo_msg_raw', 'gr_w_msg_raw']
 
         for gr, out_fp, raw_fp in zip(self.groups, out_paths, raw_paths):
-            obs_out_f = StringIO()
-            obs_raw_f = StringIO()
+            obs_out_f = io.StringIO()
+            obs_raw_f = io.StringIO()
             gr.to_files(obs_out_f, obs_raw_f)
             obs_out = obs_out_f.getvalue()
             obs_raw = obs_raw_f.getvalue()
@@ -425,8 +428,8 @@ class CategoryResultsTests(BaseTests):
         raw_paths = ['cr_no_data_raw', 'cr_data_raw']
 
         for cat, out_fp, raw_fp in zip(self.categories, out_paths, raw_paths):
-            obs_out_f = StringIO()
-            obs_raw_f = StringIO()
+            obs_out_f = io.StringIO()
+            obs_raw_f = io.StringIO()
             cat.to_files(obs_out_f, obs_raw_f)
             obs_out = obs_out_f.getvalue()
             obs_raw = obs_raw_f.getvalue()
@@ -449,8 +452,8 @@ class GradientANOVAResultsTests(BaseTests):
         raw_paths = ['vr_raw']
 
         for vr, out_fp, raw_fp in zip(self.vec_results, out_paths, raw_paths):
-            obs_out_f = StringIO()
-            obs_raw_f = StringIO()
+            obs_out_f = io.StringIO()
+            obs_raw_f = io.StringIO()
             vr.to_files(obs_out_f, obs_raw_f)
             obs_out = obs_out_f.getvalue()
             obs_raw = obs_raw_f.getvalue()
diff --git a/skbio/stats/tests/test_misc.py b/skbio/stats/tests/test_misc.py
index c94239e..21790da 100644
--- a/skbio/stats/tests/test_misc.py
+++ b/skbio/stats/tests/test_misc.py
@@ -6,7 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 from unittest import TestCase, main
 
 from skbio.stats._misc import _pprint_strs
diff --git a/skbio/stats/tests/test_power.py b/skbio/stats/tests/test_power.py
index 79576b4..fd1428b 100644
--- a/skbio/stats/tests/test_power.py
+++ b/skbio/stats/tests/test_power.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from unittest import TestCase, main
 
 import numpy as np
diff --git a/skbio/stats/tests/test_subsample.py b/skbio/stats/tests/test_subsample.py
index 0be6d15..509a227 100644
--- a/skbio/stats/tests/test_subsample.py
+++ b/skbio/stats/tests/test_subsample.py
@@ -6,26 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-try:
-    # future >= 0.12
-    from future.backports.test.support import import_fresh_module
-except ImportError:
-    from future.standard_library.test.support import import_fresh_module
-
 import unittest
 import warnings
 
 import numpy as np
 import numpy.testing as npt
 
-from skbio.stats import isubsample
-
-
-cy_subsample = import_fresh_module('skbio.stats._subsample',
-                                   fresh=['skbio.stats.__subsample'])
-py_subsample = import_fresh_module('skbio.stats._subsample',
-                                   blocked=['skbio.stats.__subsample'])
+from skbio.stats import subsample_counts, isubsample
 
 
 def setup():
@@ -38,25 +25,25 @@ def teardown():
     warnings.resetwarnings()
 
 
-class SubsampleCountsTests(object):
+class SubsampleCountsTests(unittest.TestCase):
     def test_subsample_counts_nonrandom(self):
         a = np.array([0, 5, 0])
 
         # Subsample same number of items that are in input (without
         # replacement).
-        npt.assert_equal(self.module.subsample_counts(a, 5), a)
+        npt.assert_equal(subsample_counts(a, 5), a)
 
         # Can only choose from one bin.
         exp = np.array([0, 2, 0])
-        npt.assert_equal(self.module.subsample_counts(a, 2), exp)
+        npt.assert_equal(subsample_counts(a, 2), exp)
         npt.assert_equal(
-            self.module.subsample_counts(a, 2, replace=True), exp)
+            subsample_counts(a, 2, replace=True), exp)
 
         # Subsample zero items.
         a = [3, 0, 1]
         exp = np.array([0, 0, 0])
-        npt.assert_equal(self.module.subsample_counts(a, 0), exp)
-        npt.assert_equal(self.module.subsample_counts(a, 0, replace=True), exp)
+        npt.assert_equal(subsample_counts(a, 0), exp)
+        npt.assert_equal(subsample_counts(a, 0, replace=True), exp)
 
     def test_subsample_counts_without_replacement(self):
         # Selecting 2 counts from the vector 1000 times yields each of the two
@@ -64,11 +51,11 @@ class SubsampleCountsTests(object):
         a = np.array([2, 0, 1])
         actual = set()
         for i in range(1000):
-            obs = self.module.subsample_counts(a, 2)
+            obs = subsample_counts(a, 2)
             actual.add(tuple(obs))
         self.assertEqual(actual, {(1, 0, 1), (2, 0, 0)})
 
-        obs = self.module.subsample_counts(a, 2)
+        obs = subsample_counts(a, 2)
         self.assertTrue(np.array_equal(obs, np.array([1, 0, 1])) or
                         np.array_equal(obs, np.array([2, 0, 0])))
 
@@ -78,7 +65,7 @@ class SubsampleCountsTests(object):
         a = np.array([2, 0, 1])
         actual = set()
         for i in range(1000):
-            obs = self.module.subsample_counts(a, 2, replace=True)
+            obs = subsample_counts(a, 2, replace=True)
             actual.add(tuple(obs))
         self.assertEqual(actual, {(1, 0, 1), (2, 0, 0), (0, 0, 2)})
 
@@ -90,7 +77,7 @@ class SubsampleCountsTests(object):
         a = np.array([2, 0, 1, 2, 1, 8, 6, 0, 3, 3, 5, 0, 0, 0, 5])
         actual = set()
         for i in range(1000):
-            obs = self.module.subsample_counts(a, 35, replace=True)
+            obs = subsample_counts(a, 35, replace=True)
             self.assertEqual(obs.sum(), 35)
             actual.add(tuple(obs))
         self.assertTrue(len(actual) > 10)
@@ -100,7 +87,7 @@ class SubsampleCountsTests(object):
         a = np.array([0, 0, 3, 4, 2, 1])
         actual = set()
         for i in range(1000):
-            obs = self.module.subsample_counts(a, 10, replace=True)
+            obs = subsample_counts(a, 10, replace=True)
             self.assertEqual(obs.sum(), 10)
             actual.add(tuple(obs))
         self.assertTrue(len(actual) > 1)
@@ -108,29 +95,19 @@ class SubsampleCountsTests(object):
     def test_subsample_counts_invalid_input(self):
         # Negative n.
         with self.assertRaises(ValueError):
-            self.module.subsample_counts([1, 2, 3], -1)
+            subsample_counts([1, 2, 3], -1)
 
         # Floats.
         with self.assertRaises(TypeError):
-            self.module.subsample_counts([1, 2.3, 3], 2)
+            subsample_counts([1, 2.3, 3], 2)
 
         # Wrong number of dimensions.
         with self.assertRaises(ValueError):
-            self.module.subsample_counts([[1, 2, 3], [4, 5, 6]], 2)
+            subsample_counts([[1, 2, 3], [4, 5, 6]], 2)
 
         # Input has too few counts.
         with self.assertRaises(ValueError):
-            self.module.subsample_counts([0, 5, 0], 6)
-
-
-class PySubsampleCountsTests(SubsampleCountsTests, unittest.TestCase):
-    module = py_subsample
-
-
- at unittest.skipIf(cy_subsample is None,
-                 "Accelerated subsample module unavailable.")
-class CySubsampleCountsTests(SubsampleCountsTests, unittest.TestCase):
-    module = cy_subsample
+            subsample_counts([0, 5, 0], 6)
 
 
 class ISubsampleTests(unittest.TestCase):
diff --git a/skbio/test.py b/skbio/test.py
index 6bc8059..165195a 100644
--- a/skbio/test.py
+++ b/skbio/test.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import sys
 
 from skbio.util import TestRunner
diff --git a/skbio/tests/__init__.py b/skbio/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/tests/__init__.py
+++ b/skbio/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/tests/test_base.py b/skbio/tests/test_base.py
index 66562fb..924c935 100644
--- a/skbio/tests/test_base.py
+++ b/skbio/tests/test_base.py
@@ -6,25 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-import six
-from six import binary_type, text_type
-
 import unittest
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-import numpy as np
-import numpy.testing as npt
-import pandas as pd
-from IPython.core.display import Image, SVG
-from nose.tools import assert_is_instance, assert_true
 
-from skbio import OrdinationResults
-from skbio._base import (SkbioObject, MetadataMixin, PositionalMetadataMixin,
-                         ElasticLines)
-from skbio.util._decorator import overrides
-from skbio.util._testing import (ReallyEqualMixin, MetadataMixinTests,
-                                 PositionalMetadataMixinTests)
+from skbio._base import SkbioObject, ElasticLines
 
 
 class TestSkbioObject(unittest.TestCase):
@@ -36,336 +20,6 @@ class TestSkbioObject(unittest.TestCase):
             Foo()
 
 
-class TestMetadataMixin(unittest.TestCase, ReallyEqualMixin,
-                        MetadataMixinTests):
-    def setUp(self):
-        class ExampleMetadataMixin(MetadataMixin):
-            def __init__(self, metadata=None):
-                MetadataMixin._init_(self, metadata=metadata)
-
-            def __eq__(self, other):
-                return MetadataMixin._eq_(self, other)
-
-            def __ne__(self, other):
-                return MetadataMixin._ne_(self, other)
-
-            def __copy__(self):
-                copy = self.__class__(metadata=None)
-                copy._metadata = MetadataMixin._copy_(self)
-                return copy
-
-            def __deepcopy__(self, memo):
-                copy = self.__class__(metadata=None)
-                copy._metadata = MetadataMixin._deepcopy_(self, memo)
-                return copy
-
-        self._metadata_constructor_ = ExampleMetadataMixin
-
-
-class TestPositionalMetadataMixin(unittest.TestCase, ReallyEqualMixin,
-                                  PositionalMetadataMixinTests):
-    def setUp(self):
-        class ExamplePositionalMetadataMixin(PositionalMetadataMixin):
-            @overrides(PositionalMetadataMixin)
-            def _positional_metadata_axis_len_(self):
-                return self._axis_len
-
-            def __init__(self, axis_len, positional_metadata=None):
-                self._axis_len = axis_len
-
-                PositionalMetadataMixin._init_(
-                    self, positional_metadata=positional_metadata)
-
-            def __eq__(self, other):
-                return PositionalMetadataMixin._eq_(self, other)
-
-            def __ne__(self, other):
-                return PositionalMetadataMixin._ne_(self, other)
-
-            def __copy__(self):
-                copy = self.__class__(self._axis_len, positional_metadata=None)
-                copy._positional_metadata = \
-                    PositionalMetadataMixin._copy_(self)
-                return copy
-
-            def __deepcopy__(self, memo):
-                copy = self.__class__(self._axis_len, positional_metadata=None)
-                copy._positional_metadata = \
-                    PositionalMetadataMixin._deepcopy_(self, memo)
-                return copy
-
-        self._positional_metadata_constructor_ = ExamplePositionalMetadataMixin
-
-
-class TestOrdinationResults(unittest.TestCase):
-    def setUp(self):
-        # Define in-memory CA results to serialize and deserialize.
-        eigvals = pd.Series([0.0961330159181, 0.0409418140138], ['CA1', 'CA2'])
-        features = np.array([[0.408869425742, 0.0695518116298],
-                             [-0.1153860437, -0.299767683538],
-                             [-0.309967102571, 0.187391917117]])
-        samples = np.array([[-0.848956053187, 0.882764759014],
-                            [-0.220458650578, -1.34482000302],
-                            [1.66697179591, 0.470324389808]])
-        features_ids = ['Species1', 'Species2', 'Species3']
-        sample_ids = ['Site1', 'Site2', 'Site3']
-
-        samples_df = pd.DataFrame(samples, index=sample_ids,
-                                  columns=['CA1', 'CA2'])
-        features_df = pd.DataFrame(features, index=features_ids,
-                                   columns=['CA1', 'CA2'])
-
-        self.ordination_results = OrdinationResults(
-            'CA', 'Correspondance Analysis', eigvals=eigvals,
-            samples=samples_df, features=features_df)
-
-        # DataFrame for testing plot method. Has a categorical column with a
-        # mix of numbers and strings. Has a numeric column with a mix of ints,
-        # floats, and strings that can be converted to floats. Has a numeric
-        # column with missing data (np.nan).
-        self.df = pd.DataFrame([['foo', '42', 10],
-                                [22, 0, 8],
-                                [22, -4.2, np.nan],
-                                ['foo', '42.19', 11]],
-                               index=['A', 'B', 'C', 'D'],
-                               columns=['categorical', 'numeric', 'nancolumn'])
-
-        # Minimal ordination results for easier testing of plotting method.
-        # Paired with df above.
-        eigvals = np.array([0.50, 0.25, 0.25])
-        samples = np.array([[0.1, 0.2, 0.3],
-                            [0.2, 0.3, 0.4],
-                            [0.3, 0.4, 0.5],
-                            [0.4, 0.5, 0.6]])
-        samples_df = pd.DataFrame(samples, ['A', 'B', 'C', 'D'],
-                                  ['PC1', 'PC2', 'PC3'])
-
-        self.min_ord_results = OrdinationResults(
-            'PCoA', 'Principal Coordinate Analysis', eigvals, samples_df)
-
-    def test_str(self):
-        exp = ("Ordination results:\n"
-               "\tMethod: Correspondance Analysis (CA)\n"
-               "\tEigvals: 2\n"
-               "\tProportion explained: N/A\n"
-               "\tFeatures: 3x2\n"
-               "\tSamples: 3x2\n"
-               "\tBiplot Scores: N/A\n"
-               "\tSample constraints: N/A\n"
-               "\tFeature IDs: 'Species1', 'Species2', 'Species3'\n"
-               "\tSample IDs: 'Site1', 'Site2', 'Site3'")
-        obs = str(self.ordination_results)
-        self.assertEqual(obs, exp)
-
-        # all optional attributes missing
-        exp = ("Ordination results:\n"
-               "\tMethod: Principal Coordinate Analysis (PCoA)\n"
-               "\tEigvals: 1\n"
-               "\tProportion explained: N/A\n"
-               "\tFeatures: N/A\n"
-               "\tSamples: 2x1\n"
-               "\tBiplot Scores: N/A\n"
-               "\tSample constraints: N/A\n"
-               "\tFeature IDs: N/A\n"
-               "\tSample IDs: 0, 1")
-        samples_df = pd.DataFrame(np.array([[1], [2]]))
-        obs = str(OrdinationResults('PCoA', 'Principal Coordinate Analysis',
-                                    pd.Series(np.array([4.2])), samples_df))
-        self.assertEqual(obs.split('\n'), exp.split('\n'))
-
-    def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
-                                  exp_legend_exists, exp_xlabel, exp_ylabel,
-                                  exp_zlabel):
-        # check type
-        assert_is_instance(fig, mpl.figure.Figure)
-
-        # check number of subplots
-        axes = fig.get_axes()
-        npt.assert_equal(len(axes), exp_num_subplots)
-
-        # check title
-        ax = axes[0]
-        npt.assert_equal(ax.get_title(), exp_title)
-
-        # shouldn't have tick labels
-        for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
-                           ax.get_zticklabels()):
-            npt.assert_equal(tick_label.get_text(), '')
-
-        # check if legend is present
-        legend = ax.get_legend()
-        if exp_legend_exists:
-            assert_true(legend is not None)
-        else:
-            assert_true(legend is None)
-
-        # check axis labels
-        npt.assert_equal(ax.get_xlabel(), exp_xlabel)
-        npt.assert_equal(ax.get_ylabel(), exp_ylabel)
-        npt.assert_equal(ax.get_zlabel(), exp_zlabel)
-
-    def test_plot_no_metadata(self):
-        fig = self.min_ord_results.plot()
-        self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
-
-    def test_plot_with_numeric_metadata_and_plot_options(self):
-        fig = self.min_ord_results.plot(
-            self.df, 'numeric', axes=(1, 0, 2),
-            axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
-        self.check_basic_figure_sanity(
-            fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
-
-    def test_plot_with_categorical_metadata_and_plot_options(self):
-        fig = self.min_ord_results.plot(
-            self.df, 'categorical', axes=[2, 0, 1], title='a title',
-            cmap='Accent')
-        self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
-
-    def test_plot_with_invalid_axis_labels(self):
-        with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
-            self.min_ord_results.plot(axes=[2, 0, 1],
-                                      axis_labels=('a', 'b', 'c', 'd'))
-
-    def test_validate_plot_axes_valid_input(self):
-        # shouldn't raise an error on valid input. nothing is returned, so
-        # nothing to check here
-        samples = self.min_ord_results.samples.values.T
-        self.min_ord_results._validate_plot_axes(samples, (1, 2, 0))
-
-    def test_validate_plot_axes_invalid_input(self):
-        # not enough dimensions
-        with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
-            self.min_ord_results._validate_plot_axes(
-                np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
-
-        coord_matrix = self.min_ord_results.samples.values.T
-
-        # wrong number of axes
-        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, [])
-        with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
-            self.min_ord_results._validate_plot_axes(coord_matrix,
-                                                     (0, 1, 2, 3))
-
-        # duplicate axes
-        with six.assertRaisesRegex(self, ValueError, 'must be unique'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
-
-        # out of range axes
-        with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
-        with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
-            self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
-
-    def test_get_plot_point_colors_invalid_input(self):
-        # column provided without df
-        with npt.assert_raises(ValueError):
-            self.min_ord_results._get_plot_point_colors(None, 'numeric',
-                                                        ['B', 'C'], 'jet')
-
-        # df provided without column
-        with npt.assert_raises(ValueError):
-            self.min_ord_results._get_plot_point_colors(self.df, None,
-                                                        ['B', 'C'], 'jet')
-
-        # column not in df
-        with six.assertRaisesRegex(self, ValueError, 'missingcol'):
-            self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
-                                                        ['B', 'C'], 'jet')
-
-        # id not in df
-        with six.assertRaisesRegex(self, ValueError, 'numeric'):
-            self.min_ord_results._get_plot_point_colors(
-                self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
-
-        # missing data in df
-        with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
-            self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
-                                                        ['B', 'C', 'A'], 'jet')
-
-    def test_get_plot_point_colors_no_df_or_column(self):
-        obs = self.min_ord_results._get_plot_point_colors(None, None,
-                                                          ['B', 'C'], 'jet')
-        npt.assert_equal(obs, (None, None))
-
-    def test_get_plot_point_colors_numeric_column(self):
-        # subset of the ids in df
-        exp = [0.0, -4.2, 42.0]
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'numeric', ['B', 'C', 'A'], 'jet')
-        npt.assert_almost_equal(obs[0], exp)
-        assert_true(obs[1] is None)
-
-        # all ids in df
-        exp = [0.0, 42.0, 42.19, -4.2]
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
-        npt.assert_almost_equal(obs[0], exp)
-        assert_true(obs[1] is None)
-
-    def test_get_plot_point_colors_categorical_column(self):
-        # subset of the ids in df
-        exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
-        exp_color_dict = {
-            'foo': [0.5, 0., 0., 1.],
-            22: [0., 0., 0.5, 1.]
-        }
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'categorical', ['B', 'C', 'A'], 'jet')
-        npt.assert_almost_equal(obs[0], exp_colors)
-        npt.assert_equal(obs[1], exp_color_dict)
-
-        # all ids in df
-        exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
-                      [0., 0., 0.5, 1.]]
-        obs = self.min_ord_results._get_plot_point_colors(
-            self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
-        npt.assert_almost_equal(obs[0], exp_colors)
-        # should get same color dict as before
-        npt.assert_equal(obs[1], exp_color_dict)
-
-    def test_plot_categorical_legend(self):
-        fig = plt.figure()
-        ax = fig.add_subplot(111, projection='3d')
-
-        # we shouldn't have a legend yet
-        assert_true(ax.get_legend() is None)
-
-        self.min_ord_results._plot_categorical_legend(
-            ax, {'foo': 'red', 'bar': 'green'})
-
-        # make sure we have a legend now
-        legend = ax.get_legend()
-        assert_true(legend is not None)
-
-        # do some light sanity checking to make sure our input labels and
-        # colors are present. we're not using nose.tools.assert_items_equal
-        # because it isn't available in Python 3.
-        labels = [t.get_text() for t in legend.get_texts()]
-        npt.assert_equal(sorted(labels), ['bar', 'foo'])
-
-        colors = [l.get_color() for l in legend.get_lines()]
-        npt.assert_equal(sorted(colors), ['green', 'red'])
-
-    def test_repr_png(self):
-        obs = self.min_ord_results._repr_png_()
-        assert_is_instance(obs, binary_type)
-        assert_true(len(obs) > 0)
-
-    def test_repr_svg(self):
-        obs = self.min_ord_results._repr_svg_()
-        # print_figure(format='svg') can return text or bytes depending on the
-        # version of IPython
-        assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
-        assert_true(len(obs) > 0)
-
-    def test_png(self):
-        assert_is_instance(self.min_ord_results.png, Image)
-
-    def test_svg(self):
-        assert_is_instance(self.min_ord_results.svg, SVG)
-
-
 class TestElasticLines(unittest.TestCase):
     def setUp(self):
         self.el = ElasticLines()
diff --git a/skbio/tests/test_workflow.py b/skbio/tests/test_workflow.py
index 297486c..3f16583 100644
--- a/skbio/tests/test_workflow.py
+++ b/skbio/tests/test_workflow.py
@@ -6,9 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-from future.builtins import zip
 from collections import defaultdict
 from skbio.workflow import (Exists, NotExecuted, NotNone, Workflow, not_none,
                             requires, method)
diff --git a/skbio/tree/__init__.py b/skbio/tree/__init__.py
index ee6a3c6..72d98ba 100644
--- a/skbio/tree/__init__.py
+++ b/skbio/tree/__init__.py
@@ -181,8 +181,6 @@ pairwise tip-to-tip distances between trees:
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from skbio.util import TestRunner
 
 from ._tree import TreeNode
diff --git a/skbio/tree/_exception.py b/skbio/tree/_exception.py
index fcecbdd..1dfe2ff 100644
--- a/skbio/tree/_exception.py
+++ b/skbio/tree/_exception.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 
 class TreeError(Exception):
     """General tree error"""
diff --git a/skbio/tree/_majority_rule.py b/skbio/tree/_majority_rule.py
index 4bebb8f..17638a3 100644
--- a/skbio/tree/_majority_rule.py
+++ b/skbio/tree/_majority_rule.py
@@ -6,10 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from collections import defaultdict
-from future.builtins import zip
 
 import numpy as np
 
@@ -116,7 +113,7 @@ def _filter_clades(clade_counts, cutoff_threshold):
     return accepted_clades
 
 
-def _build_trees(clade_counts, edge_lengths, support_attr):
+def _build_trees(clade_counts, edge_lengths, support_attr, tree_node_class):
     """Construct the trees with support
 
     Parameters
@@ -127,10 +124,14 @@ def _build_trees(clade_counts, edge_lengths, support_attr):
         Keyed by the frozenset of the clade and valued by the weighted length
     support_attr : str
         The name of the attribute to hold the support value
+    tree_node_class : type
+        Specifies type of consensus trees that are returned. Either
+        ``TreeNode`` or a type that implements the same interface (most
+        usefully, a subclass of ``TreeNode``).
 
     Returns
     -------
-    list of TreeNode
+    list of tree_node_class instances
         A list of the constructed trees
     """
     nodes = {}
@@ -169,7 +170,7 @@ def _build_trees(clade_counts, edge_lengths, support_attr):
         children = [nodes.pop(c) for c in clade if c in nodes]
         length = edge_lengths[clade]
 
-        node = TreeNode(children=children, length=length, name=name)
+        node = tree_node_class(children=children, length=length, name=name)
         setattr(node, support_attr, clade_counts[clade])
         nodes[clade] = node
 
@@ -179,7 +180,8 @@ def _build_trees(clade_counts, edge_lengths, support_attr):
 
 
 @experimental(as_of="0.4.0")
-def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
+def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support',
+                  tree_node_class=TreeNode):
     r"""Determines consensus trees from a list of rooted trees
 
     Parameters
@@ -190,19 +192,24 @@ def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
         If provided, the list must be in index order with `trees`. Each tree
         will receive the corresponding weight. If omitted, all trees will be
         equally weighted.
-    cutoff : float, 0.0 <= cutoff <= 1.0
+    cutoff : float, 0.0 <= cutoff <= 1.0, optional
         Any clade that has <= cutoff support will be dropped. If cutoff is
         < 0.5, then it is possible that ties will result. If so, ties are
         broken arbitrarily depending on list sort order.
-    support_attr : str
+    support_attr : str, optional
         The attribute to be decorated onto the resulting trees that contain the
         consensus support.
+    tree_node_class : type, optional
+        Specifies type of consensus trees that are returned. Either
+        ``TreeNode`` (the default) or a type that implements the same interface
+        (most usefully, a subclass of ``TreeNode``).
 
     Returns
     -------
-    list of TreeNode
-        Multiple trees can be returned in the case of two or more disjoint sets
-        of tips represented on input.
+    list of tree_node_class instances
+        Each tree will be of type `tree_node_class`. Multiple trees can be
+        returned in the case of two or more disjoint sets of tips represented
+        on input.
 
     Notes
     -----
@@ -278,6 +285,7 @@ def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
 
     clade_counts, edge_lengths = _walk_clades(trees, weights)
     clade_counts = _filter_clades(clade_counts, cutoff_threshold)
-    trees = _build_trees(clade_counts, edge_lengths, support_attr)
+    trees = _build_trees(clade_counts, edge_lengths, support_attr,
+                         tree_node_class)
 
     return trees
diff --git a/skbio/tree/_nj.py b/skbio/tree/_nj.py
index dc4b603..11cced3 100644
--- a/skbio/tree/_nj.py
+++ b/skbio/tree/_nj.py
@@ -6,14 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
+import io
 
 import numpy as np
 
 from skbio.stats.distance import DistanceMatrix
 from skbio.tree import TreeNode
 from skbio.util._decorator import experimental
-from skbio.io._fileobject import StringIO
 
 
 @experimental(as_of="0.4.0")
@@ -112,7 +111,7 @@ def nj(dm, disallow_negative_branch_length=True, result_constructor=None):
 
     if result_constructor is None:
         def result_constructor(x):
-            return TreeNode.read(StringIO(x), format='newick')
+            return TreeNode.read(io.StringIO(x), format='newick')
 
     # initialize variables
     node_definition = None
diff --git a/skbio/tree/_tree.py b/skbio/tree/_tree.py
index aa58959..a758a08 100644
--- a/skbio/tree/_tree.py
+++ b/skbio/tree/_tree.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import warnings
 from operator import or_, itemgetter
 from copy import deepcopy
@@ -17,8 +15,6 @@ from collections import defaultdict
 
 import numpy as np
 from scipy.stats import pearsonr
-from future.builtins import zip
-import six
 
 from skbio._base import SkbioObject
 from skbio.stats.distance import DistanceMatrix
@@ -387,6 +383,18 @@ class TreeNode(SkbioObject):
             if len(node.children) == 1:
                 nodes_to_remove.append(node)
 
+        # if a single descendent from the root, the root adopts the childs
+        # properties. we can't "delete" the root as that would be deleting
+        # self.
+        if len(self.children) == 1:
+            node_to_copy = self.children[0]
+            efc = self._exclude_from_copy
+            for key in node_to_copy.__dict__:
+                if key not in efc:
+                    self.__dict__[key] = deepcopy(node_to_copy.__dict__[key])
+            self.remove(node_to_copy)
+            self.children.extend(node_to_copy.children)
+
         # clean up the single children nodes
         for node in nodes_to_remove:
             child = node.children[0]
@@ -795,7 +803,7 @@ class TreeNode(SkbioObject):
         <BLANKLINE>
 
         """
-        if isinstance(node, six.string_types):
+        if isinstance(node, str):
             node = self.find(node)
 
         if not node.children:
@@ -1766,7 +1774,8 @@ class TreeNode(SkbioObject):
         Raises
         ------
         ValueError
-            If no tips could be found in the tree
+            If no tips could be found in the tree, or if not all tips were
+            found.
 
         Examples
         --------
@@ -1883,13 +1892,13 @@ class TreeNode(SkbioObject):
                 if name in cur_node._lookup:
                     cur_node = cur_node._lookup[name]
                 else:
-                    new_node = TreeNode(name=name)
+                    new_node = cls(name=name)
                     new_node._lookup = {}
                     cur_node._lookup[name] = new_node
                     cur_node.append(new_node)
                     cur_node = new_node
 
-            cur_node.append(TreeNode(name=id_))
+            cur_node.append(cls(name=id_))
 
         # scrub the lookups
         for node in root.non_tips(include_self=True):
@@ -1942,13 +1951,13 @@ class TreeNode(SkbioObject):
         tip_width = len(id_list)
         cluster_count = len(linkage_matrix)
         lookup_len = cluster_count + tip_width
-        node_lookup = np.empty(lookup_len, dtype=TreeNode)
+        node_lookup = np.empty(lookup_len, dtype=cls)
 
         for i, name in enumerate(id_list):
-            node_lookup[i] = TreeNode(name=name)
+            node_lookup[i] = cls(name=name)
 
         for i in range(tip_width, lookup_len):
-            node_lookup[i] = TreeNode()
+            node_lookup[i] = cls()
 
         newest_cluster_index = cluster_count + 1
         for link in linkage_matrix:
@@ -2305,12 +2314,20 @@ class TreeNode(SkbioObject):
         if self is other:
             return 0.0
 
-        root = self.root()
-        lca = root.lowest_common_ancestor([self, other])
-        accum = self.accumulate_to_ancestor(lca)
-        accum += other.accumulate_to_ancestor(lca)
+        self_ancestors = [self] + list(self.ancestors())
+        other_ancestors = [other] + list(other.ancestors())
 
-        return accum
+        if self in other_ancestors:
+            return other.accumulate_to_ancestor(self)
+        elif other in self_ancestors:
+            return self.accumulate_to_ancestor(other)
+        else:
+            root = self.root()
+            lca = root.lowest_common_ancestor([self, other])
+            accum = self.accumulate_to_ancestor(lca)
+            accum += other.accumulate_to_ancestor(lca)
+
+            return accum
 
     def _set_max_distance(self):
         """Propagate tip distance information up the tree
@@ -2773,7 +2790,7 @@ class TreeNode(SkbioObject):
                 stack = n.children
                 while len(stack) > 2:
                     ind = stack.pop()
-                    intermediate = TreeNode()
+                    intermediate = self.__class__()
                     intermediate.length = insert_length
                     intermediate.extend(stack)
                     n.append(intermediate)
diff --git a/skbio/tree/tests/__init__.py b/skbio/tree/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/tree/tests/__init__.py
+++ b/skbio/tree/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/tree/tests/test_majority_rule.py b/skbio/tree/tests/test_majority_rule.py
index d466c17..3ce447c 100644
--- a/skbio/tree/tests/test_majority_rule.py
+++ b/skbio/tree/tests/test_majority_rule.py
@@ -6,13 +6,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
+import io
 from unittest import TestCase, main
 
 import numpy as np
 
-from skbio.io._fileobject import StringIO
 from skbio import TreeNode
 from skbio.tree import majority_rule
 from skbio.tree._majority_rule import (_walk_clades, _filter_clades,
@@ -22,17 +20,26 @@ from skbio.tree._majority_rule import (_walk_clades, _filter_clades,
 class MajorityRuleTests(TestCase):
     def test_majority_rule(self):
         trees = [
-            TreeNode.read(StringIO("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
-            TreeNode.read(StringIO("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
-            TreeNode.read(StringIO("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
-            TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
-            TreeNode.read(StringIO("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
-            TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
-            TreeNode.read(StringIO("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
-            TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
-            TreeNode.read(StringIO("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
-
-        exp = TreeNode.read(StringIO("(((E,(G,(F,I),(C,(D,J,H)))),B),A);"))
+            TreeNode.read(
+                io.StringIO("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));")),
+            TreeNode.read(
+                io.StringIO("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));"))]
+
+        exp = TreeNode.read(io.StringIO("(((E,(G,(F,I),(C,(D,J,H)))),B),A);"))
         obs = majority_rule(trees)
         self.assertEqual(exp.compare_subsets(obs[0]), 0.0)
         self.assertEqual(len(obs), 1)
@@ -55,14 +62,42 @@ class MajorityRuleTests(TestCase):
 
     def test_majority_rule_multiple_trees(self):
         trees = [
-            TreeNode.read(StringIO("((a,b),(c,d),(e,f));")),
-            TreeNode.read(StringIO("(a,(c,d),b,(e,f));")),
-            TreeNode.read(StringIO("((c,d),(e,f),b);")),
-            TreeNode.read(StringIO("(a,(c,d),(e,f));"))]
+            TreeNode.read(io.StringIO("((a,b),(c,d),(e,f));")),
+            TreeNode.read(io.StringIO("(a,(c,d),b,(e,f));")),
+            TreeNode.read(io.StringIO("((c,d),(e,f),b);")),
+            TreeNode.read(io.StringIO("(a,(c,d),(e,f));"))]
 
         trees = majority_rule(trees)
         self.assertEqual(len(trees), 4)
 
+        for tree in trees:
+            self.assertIs(type(tree), TreeNode)
+
+        exp = set([
+                  frozenset(['a']),
+                  frozenset(['b']),
+                  frozenset([None, 'c', 'd']),
+                  frozenset([None, 'e', 'f'])])
+
+        obs = set([frozenset([n.name for n in t.traverse()]) for t in trees])
+        self.assertEqual(obs, exp)
+
+    def test_majority_rule_tree_node_class(self):
+        class TreeNodeSubclass(TreeNode):
+            pass
+
+        trees = [
+            TreeNode.read(io.StringIO("((a,b),(c,d),(e,f));")),
+            TreeNode.read(io.StringIO("(a,(c,d),b,(e,f));")),
+            TreeNode.read(io.StringIO("((c,d),(e,f),b);")),
+            TreeNode.read(io.StringIO("(a,(c,d),(e,f));"))]
+
+        trees = majority_rule(trees, tree_node_class=TreeNodeSubclass)
+        self.assertEqual(len(trees), 4)
+
+        for tree in trees:
+            self.assertIs(type(tree), TreeNodeSubclass)
+
         exp = set([
                   frozenset(['a']),
                   frozenset(['b']),
@@ -73,8 +108,8 @@ class MajorityRuleTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_walk_clades(self):
-        trees = [TreeNode.read(StringIO("((A,B),(D,E));")),
-                 TreeNode.read(StringIO("((A,B),(D,(E,X)));"))]
+        trees = [TreeNode.read(io.StringIO("((A,B),(D,E));")),
+                 TreeNode.read(io.StringIO("((A,B),(D,(E,X)));"))]
         exp_clades = [
             (frozenset(['A']), 2.0),
             (frozenset(['B']), 2.0),
@@ -159,7 +194,7 @@ class MajorityRuleTests(TestCase):
         edge_lengths = {frozenset(['A', 'B']): 1,
                         frozenset(['A']): 2,
                         frozenset(['B']): 3}
-        tree = _build_trees(clade_counts, edge_lengths, 'foo')[0]
+        tree = _build_trees(clade_counts, edge_lengths, 'foo', TreeNode)[0]
         self.assertEqual(tree.foo, 6)
         tree_foos = set([c.foo for c in tree.children])
         tree_lens = set([c.length for c in tree.children])
diff --git a/skbio/tree/tests/test_nj.py b/skbio/tree/tests/test_nj.py
index 75bad7c..4cd149c 100644
--- a/skbio/tree/tests/test_nj.py
+++ b/skbio/tree/tests/test_nj.py
@@ -6,11 +6,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
+import io
 from unittest import TestCase, main
 
-from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix, TreeNode, nj
 from skbio.tree._nj import (
     _compute_q, _compute_collapsed_dm, _lowest_index, _otu_to_new_node,
@@ -32,7 +30,8 @@ class NjTests(TestCase):
         # (d:2.0000,e:1.0000,(c:4.0000,(a:2.0000,b:3.0000):3.0000):2.0000);
         self.expected1_str = ("(d:2.000000, (c:4.000000, (b:3.000000,"
                               " a:2.000000):3.000000):2.000000, e:1.000000);")
-        self.expected1_TreeNode = TreeNode.read(StringIO(self.expected1_str))
+        self.expected1_TreeNode = TreeNode.read(
+                io.StringIO(self.expected1_str))
 
         # this example was pulled from the Phylip manual
         # http://evolution.genetics.washington.edu/phylip/doc/neighbor.html
@@ -50,7 +49,8 @@ class NjTests(TestCase):
                               ", (Gorilla:0.15393, (Chimp:0.15167, Human:0.117"
                               "53):0.03982):0.02696):0.04648):0.42027, Bovine:"
                               "0.91769);")
-        self.expected2_TreeNode = TreeNode.read(StringIO(self.expected2_str))
+        self.expected2_TreeNode = TreeNode.read(
+                io.StringIO(self.expected2_str))
 
         data3 = [[0, 5, 4, 7, 6, 8],
                  [5, 0, 7, 10, 9, 11],
@@ -63,7 +63,8 @@ class NjTests(TestCase):
         self.expected3_str = ("((((0:1.000000,1:4.000000):1.000000,2:2.000000"
                               "):1.250000,5:4.750000):0.750000,3:2.750000,4:2."
                               "250000);")
-        self.expected3_TreeNode = TreeNode.read(StringIO(self.expected3_str))
+        self.expected3_TreeNode = TreeNode.read(
+                io.StringIO(self.expected3_str))
 
         # this dm can yield negative branch lengths
         data4 = [[0,  5,  9,  9,  800],
diff --git a/skbio/tree/tests/test_tree.py b/skbio/tree/tests/test_tree.py
index 4424ddb..f2f65dc 100644
--- a/skbio/tree/tests/test_tree.py
+++ b/skbio/tree/tests/test_tree.py
@@ -6,8 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
+import io
 from unittest import TestCase, main
 from collections import defaultdict
 
@@ -15,18 +14,21 @@ import numpy as np
 import numpy.testing as npt
 from scipy.stats import pearsonr
 
-from skbio.io._fileobject import StringIO
 from skbio import DistanceMatrix, TreeNode
 from skbio.tree import (DuplicateNodeError, NoLengthError,
                         TreeError, MissingNodeError, NoParentError)
 from skbio.util import RepresentationWarning
 
 
+class TreeNodeSubclass(TreeNode):
+    pass
+
+
 class TreeTests(TestCase):
 
     def setUp(self):
         """Prep the self"""
-        self.simple_t = TreeNode.read(StringIO(u"((a,b)i1,(c,d)i2)root;"))
+        self.simple_t = TreeNode.read(io.StringIO("((a,b)i1,(c,d)i2)root;"))
         nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])
         nodes['a'].append(nodes['b'])
         nodes['b'].append(nodes['c'])
@@ -35,7 +37,6 @@ class TreeTests(TestCase):
         nodes['c'].append(nodes['f'])
         nodes['f'].append(nodes['g'])
         nodes['a'].append(nodes['h'])
-        self.TreeNode = nodes
         self.TreeRoot = nodes['a']
 
         def rev_f(items):
@@ -48,9 +49,8 @@ class TreeTests(TestCase):
 
         self.rev_f = rev_f
         self.rotate_f = rotate_f
-        self.complex_tree = TreeNode.read(StringIO(u"(((a,b)int1,(x,y,(w,z)int"
-                                                   "2,(c,d)int3)int4),(e,f)int"
-                                                   "5);"))
+        self.complex_tree = TreeNode.read(io.StringIO(
+            "(((a,b)int1,(x,y,(w,z)int2,(c,d)int3)int4),(e,f)int5);"))
 
     def test_observed_node_counts(self):
         """returns observed nodes counts given vector of otu observation counts
@@ -171,7 +171,7 @@ class TreeTests(TestCase):
 
     def test_append(self):
         """Append a node to a tree"""
-        second_tree = TreeNode.read(StringIO(u"(x,y)z;"))
+        second_tree = TreeNode.read(io.StringIO("(x,y)z;"))
         self.simple_t.append(second_tree)
 
         self.assertEqual(self.simple_t.children[0].name, 'i1')
@@ -184,10 +184,10 @@ class TreeTests(TestCase):
 
     def test_extend(self):
         """Extend a few nodes"""
-        second_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
-        third_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
-        first_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
-        fourth_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
+        second_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
+        third_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
+        first_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
+        fourth_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
         self.simple_t.extend([second_tree, third_tree])
 
         first_tree.extend(fourth_tree.children)
@@ -243,8 +243,8 @@ class TreeTests(TestCase):
 
     def test_pop(self):
         """Pop off a node"""
-        second_tree = TreeNode.read(StringIO(u"(x1,y1)z1;"))
-        third_tree = TreeNode.read(StringIO(u"(x2,y2)z2;"))
+        second_tree = TreeNode.read(io.StringIO("(x1,y1)z1;"))
+        third_tree = TreeNode.read(io.StringIO("(x2,y2)z2;"))
         self.simple_t.extend([second_tree, third_tree])
 
         i1 = self.simple_t.pop(0)
@@ -303,6 +303,12 @@ class TreeTests(TestCase):
         self.assertEqual(len(n.children), 2)
         self.assertNotIn(n, self.simple_t.children)
 
+    def test_prune_root_single_desc(self):
+        t = TreeNode.read(["((a,b)c)extra;"])
+        exp = "(a,b)c;\n"
+        t.prune()
+        self.assertEqual(str(t), exp)
+
     def test_prune(self):
         """Collapse single descendent nodes"""
         # check the identity case
@@ -397,7 +403,7 @@ class TreeTests(TestCase):
         self.assertEqual(root._non_tip_cache, {})
 
     def test_invalidate_attr_caches(self):
-        tree = TreeNode.read(StringIO(u"((a,b,(c,d)e)f,(g,h)i)root;"))
+        tree = TreeNode.read(io.StringIO("((a,b,(c,d)e)f,(g,h)i)root;"))
 
         def f(n):
             return [n.name] if n.is_tip() else []
@@ -409,10 +415,10 @@ class TreeTests(TestCase):
 
     def test_create_caches_duplicate_tip_names(self):
         with self.assertRaises(DuplicateNodeError):
-            TreeNode.read(StringIO(u'(a, a);')).create_caches()
+            TreeNode.read(io.StringIO('(a, a);')).create_caches()
 
     def test_find_all(self):
-        t = TreeNode.read(StringIO(u"((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;"))
+        t = TreeNode.read(io.StringIO("((a,b)c,((d,e)c)c,(f,(g,h)c)a)root;"))
         exp = [t.children[0],
                t.children[1].children[0],
                t.children[1],
@@ -438,7 +444,7 @@ class TreeTests(TestCase):
 
     def test_find(self):
         """Find a node in a tree"""
-        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
+        t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
         exp = t.children[0]
         obs = t.find('c')
         self.assertEqual(obs, exp)
@@ -452,7 +458,7 @@ class TreeTests(TestCase):
 
     def test_find_cache_bug(self):
         """First implementation did not force the cache to be at the root"""
-        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f,(g,h)f);"))
+        t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f,(g,h)f);"))
         exp_tip_cache_keys = set(['a', 'b', 'd', 'e', 'g', 'h'])
         exp_non_tip_cache_keys = set(['c', 'f'])
         tip_a = t.children[0].children[0]
@@ -464,8 +470,8 @@ class TreeTests(TestCase):
 
     def test_find_by_id(self):
         """Find a node by id"""
-        t1 = TreeNode.read(StringIO(u"((,),(,,));"))
-        t2 = TreeNode.read(StringIO(u"((,),(,,));"))
+        t1 = TreeNode.read(io.StringIO("((,),(,,));"))
+        t2 = TreeNode.read(io.StringIO("((,),(,,));"))
 
         exp = t1.children[1]
         obs = t1.find_by_id(6)  # right inner node with 3 children
@@ -480,7 +486,7 @@ class TreeTests(TestCase):
 
     def test_find_by_func(self):
         """Find nodes by a function"""
-        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
+        t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
 
         def func(x):
             return x.parent == t.find('c')
@@ -526,7 +532,7 @@ class TreeTests(TestCase):
     def test_ascii_art(self):
         """Make some ascii trees"""
         # unlabeled internal node
-        tr = TreeNode.read(StringIO(u"(B:0.2,(C:0.3,D:0.4):0.6)F;"))
+        tr = TreeNode.read(io.StringIO("(B:0.2,(C:0.3,D:0.4):0.6)F;"))
         obs = tr.ascii_art(show_internal=True, compact=False)
         exp = "          /-B\n-F-------|\n         |          /-C\n         "\
               " \\--------|\n                    \\-D"
@@ -540,13 +546,13 @@ class TreeTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_ascii_art_three_children(self):
-        obs = TreeNode.read(StringIO(u'(a,(b,c,d));')).ascii_art()
+        obs = TreeNode.read(io.StringIO('(a,(b,c,d));')).ascii_art()
         self.assertEqual(obs, exp_ascii_art_three_children)
 
     def test_accumulate_to_ancestor(self):
         """Get the distance from a node to its ancestor"""
-        t = TreeNode.read(StringIO(
-            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
+        t = TreeNode.read(io.StringIO(
+            "((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
         a = t.find('a')
         b = t.find('b')
         exp_to_root = 0.1 + 0.3
@@ -556,10 +562,16 @@ class TreeTests(TestCase):
         with self.assertRaises(NoParentError):
             a.accumulate_to_ancestor(b)
 
+    def test_distance_nontip(self):
+        # example derived from issue #807, credit @wwood
+        tstr = "((A:1.0,B:2.0)'g__genus1':3.0)root;"
+        tree = TreeNode.read(io.StringIO(tstr))
+        self.assertEqual(tree.find('A').distance(tree.find('g__genus1')), 1.0)
+
     def test_distance(self):
         """Get the distance between two nodes"""
-        t = TreeNode.read(StringIO(
-            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
+        t = TreeNode.read(io.StringIO(
+            "((a:0.1,b:0.2)c:0.3,(d:0.4,e)f:0.5)root;"))
         tips = sorted([n for n in t.tips()], key=lambda x: x.name)
 
         npt.assert_almost_equal(tips[0].distance(tips[0]), 0.0)
@@ -582,7 +594,7 @@ class TreeTests(TestCase):
 
     def test_lowest_common_ancestor(self):
         """TreeNode lowestCommonAncestor should return LCA for set of tips"""
-        t1 = TreeNode.read(StringIO(u"((a,(b,c)d)e,f,(g,h)i)j;"))
+        t1 = TreeNode.read(io.StringIO("((a,(b,c)d)e,f,(g,h)i)j;"))
         t2 = t1.copy()
         t3 = t1.copy()
         t4 = t1.copy()
@@ -618,16 +630,16 @@ class TreeTests(TestCase):
 
     def test_get_max_distance(self):
         """get_max_distance should get max tip distance across tree"""
-        tree = TreeNode.read(StringIO(
-            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
+        tree = TreeNode.read(io.StringIO(
+            "((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
         dist, nodes = tree.get_max_distance()
         npt.assert_almost_equal(dist, 1.6)
         self.assertEqual(sorted([n.name for n in nodes]), ['b', 'e'])
 
     def test_set_max_distance(self):
         """set_max_distance sets MaxDistTips across tree"""
-        tree = TreeNode.read(StringIO(
-            u"((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
+        tree = TreeNode.read(io.StringIO(
+            "((a:0.1,b:0.2)c:0.3,(d:0.4,e:0.5)f:0.6)root;"))
         tree._set_max_distance()
         tip_a, tip_b = tree.MaxDistTips
         self.assertEqual(tip_a[0] + tip_b[0], 1.6)
@@ -635,7 +647,7 @@ class TreeTests(TestCase):
 
     def test_set_max_distance_tie_bug(self):
         """Corresponds to #1077"""
-        s = StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
+        s = io.StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
         t = TreeNode.read(s)
 
         exp = ((3.0, t.find('a')), (9.0, t.find('e')))
@@ -651,7 +663,7 @@ class TreeTests(TestCase):
 
     def test_set_max_distance_inplace_modification_bug(self):
         """Corresponds to #1223"""
-        s = StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
+        s = io.StringIO("((a:1,b:1)c:2,(d:3,e:4)f:5)root;")
         t = TreeNode.read(s)
 
         exp = [((0.0, t.find('a')), (0.0, t.find('a'))),
@@ -668,14 +680,14 @@ class TreeTests(TestCase):
 
     def test_shear(self):
         """Shear the nodes"""
-        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
         obs = str(t.shear(['G', 'M']))
         exp = '(G:3.0,M:3.7);\n'
         self.assertEqual(obs, exp)
 
     def test_compare_tip_distances(self):
-        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
-        t2 = TreeNode.read(StringIO(u'(((H:1,G:1,O:1):2,R:3):1,X:4);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(io.StringIO('(((H:1,G:1,O:1):2,R:3):1,X:4);'))
         obs = t.compare_tip_distances(t2)
         # note: common taxa are H, G, R (only)
         m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
@@ -684,8 +696,8 @@ class TreeTests(TestCase):
         self.assertAlmostEqual(obs, (1 - r) / 2)
 
     def test_compare_tip_distances_sample(self):
-        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
-        t2 = TreeNode.read(StringIO(u'(((H:1,G:1,O:1):2,R:3):1,X:4);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(io.StringIO('(((H:1,G:1,O:1):2,R:3):1,X:4);'))
         obs = t.compare_tip_distances(t2, sample=3, shuffle_f=sorted)
         # note: common taxa are H, G, R (only)
         m1 = np.array([[0, 2, 6.5], [2, 0, 6.5], [6.5, 6.5, 0]])
@@ -694,29 +706,29 @@ class TreeTests(TestCase):
         self.assertAlmostEqual(obs, (1 - r) / 2)
 
         # 4 common taxa, still picking H, G, R
-        s = u'((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
-        t = TreeNode.read(StringIO(s))
-        s3 = u'(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
-        t3 = TreeNode.read(StringIO(s3))
+        s = '((H:1,G:1):2,(R:0.5,M:0.7,Q:5):3);'
+        t = TreeNode.read(io.StringIO(s))
+        s3 = '(((H:1,G:1,O:1):2,R:3,Q:10):1,X:4);'
+        t3 = TreeNode.read(io.StringIO(s3))
         obs = t.compare_tip_distances(t3, sample=3, shuffle_f=sorted)
 
     def test_compare_tip_distances_no_common_tips(self):
-        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
-        t2 = TreeNode.read(StringIO(u'(((Z:1,Y:1,X:1):2,W:3):1,V:4);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(io.StringIO('(((Z:1,Y:1,X:1):2,W:3):1,V:4);'))
 
         with self.assertRaises(ValueError):
             t.compare_tip_distances(t2)
 
     def test_compare_tip_distances_single_common_tip(self):
-        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
-        t2 = TreeNode.read(StringIO(u'(((R:1,Y:1,X:1):2,W:3):1,V:4);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t2 = TreeNode.read(io.StringIO('(((R:1,Y:1,X:1):2,W:3):1,V:4);'))
 
         self.assertEqual(t.compare_tip_distances(t2), 1)
         self.assertEqual(t2.compare_tip_distances(t), 1)
 
     def test_tip_tip_distances_endpoints(self):
         """Test getting specifc tip distances  with tipToTipDistances"""
-        t = TreeNode.read(StringIO(u'((H:1,G:1):2,(R:0.5,M:0.7):3);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1):2,(R:0.5,M:0.7):3);'))
         nodes = [t.find('H'), t.find('G'), t.find('M')]
         names = ['H', 'G', 'M']
         exp = DistanceMatrix(np.array([[0, 2.0, 6.7],
@@ -730,13 +742,13 @@ class TreeTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_tip_tip_distances_non_tip_endpoints(self):
-        t = TreeNode.read(StringIO(u'((H:1,G:1)foo:2,(R:0.5,M:0.7):3);'))
+        t = TreeNode.read(io.StringIO('((H:1,G:1)foo:2,(R:0.5,M:0.7):3);'))
         with self.assertRaises(ValueError):
             t.tip_tip_distances(endpoints=['foo'])
 
     def test_tip_tip_distances_no_length(self):
-        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
-        exp_t = TreeNode.read(StringIO(u"((a:0,b:0)c:0,(d:0,e:0)f:0);"))
+        t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
+        exp_t = TreeNode.read(io.StringIO("((a:0,b:0)c:0,(d:0,e:0)f:0);"))
         exp_t_dm = exp_t.tip_tip_distances()
 
         t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
@@ -746,8 +758,8 @@ class TreeTests(TestCase):
             self.assertIs(node.length, None)
 
     def test_tip_tip_distances_missing_length(self):
-        t = TreeNode.read(StringIO(u"((a,b:6)c:4,(d,e:0)f);"))
-        exp_t = TreeNode.read(StringIO(u"((a:0,b:6)c:4,(d:0,e:0)f:0);"))
+        t = TreeNode.read(io.StringIO("((a,b:6)c:4,(d,e:0)f);"))
+        exp_t = TreeNode.read(io.StringIO("((a:0,b:6)c:4,(d:0,e:0)f:0);"))
         exp_t_dm = exp_t.tip_tip_distances()
 
         t_dm = npt.assert_warns(RepresentationWarning, t.tip_tip_distances)
@@ -755,7 +767,7 @@ class TreeTests(TestCase):
 
     def test_neighbors(self):
         """Get neighbors of a node"""
-        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
+        t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
         exp = t.children
         obs = t.neighbors()
         self.assertEqual(obs, exp)
@@ -774,7 +786,7 @@ class TreeTests(TestCase):
 
     def test_has_children(self):
         """Test if has children"""
-        t = TreeNode.read(StringIO(u"((a,b)c,(d,e)f);"))
+        t = TreeNode.read(io.StringIO("((a,b)c,(d,e)f);"))
         self.assertTrue(t.has_children())
         self.assertTrue(t.children[0].has_children())
         self.assertTrue(t.children[1].has_children())
@@ -818,8 +830,8 @@ class TreeTests(TestCase):
         self.assertEqual(obs, exp)
 
     def test_bifurcate(self):
-        t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
-        t2 = TreeNode.read(StringIO(u'((a,b,c));'))
+        t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
+        t2 = TreeNode.read(io.StringIO('((a,b,c));'))
         t3 = t2.copy()
 
         t1.bifurcate()
@@ -830,9 +842,21 @@ class TreeTests(TestCase):
         self.assertEqual(str(t2), '((c,(a,b)));\n')
         self.assertEqual(str(t3), '((c,(a,b):0));\n')
 
+    def test_bifurcate_with_subclass(self):
+        tree = TreeNodeSubclass()
+        tree.append(TreeNodeSubclass())
+        tree.append(TreeNodeSubclass())
+        tree.append(TreeNodeSubclass())
+        tree.append(TreeNodeSubclass())
+
+        tree.bifurcate()
+
+        for node in tree.traverse():
+            self.assertIs(type(node), TreeNodeSubclass)
+
     def test_index_tree_single_node(self):
         """index_tree handles single node tree"""
-        t1 = TreeNode.read(StringIO(u'root;'))
+        t1 = TreeNode.read(io.StringIO('root;'))
         id_index, child_index = t1.index_tree()
         self.assertEqual(id_index[0], t1)
         npt.assert_equal(child_index, np.array([[]]))
@@ -840,9 +864,9 @@ class TreeTests(TestCase):
     def test_index_tree(self):
         """index_tree should produce correct index and node map"""
         # test for first tree: contains singleton outgroup
-        t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
-        t2 = TreeNode.read(StringIO(u'(((a,b),(c,d)),(e,f));'))
-        t3 = TreeNode.read(StringIO(u'(((a,b,c),(d)),(e,f));'))
+        t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
+        t2 = TreeNode.read(io.StringIO('(((a,b),(c,d)),(e,f));'))
+        t3 = TreeNode.read(io.StringIO('(((a,b,c),(d)),(e,f));'))
 
         id_1, child_1 = t1.index_tree()
         nodes_1 = [n.id for n in t1.traverse(self_before=False,
@@ -870,7 +894,7 @@ class TreeTests(TestCase):
 
     def test_root_at(self):
         """Form a new root"""
-        t = TreeNode.read(StringIO(u"(((a,b)c,(d,e)f)g,h)i;"))
+        t = TreeNode.read(io.StringIO("(((a,b)c,(d,e)f)g,h)i;"))
         with self.assertRaises(TreeError):
             t.root_at(t.find('h'))
 
@@ -894,16 +918,16 @@ class TreeTests(TestCase):
 
     def test_root_at_midpoint_no_lengths(self):
         # should get same tree back (a copy)
-        nwk = u'(a,b)c;\n'
-        t = TreeNode.read(StringIO(nwk))
+        nwk = '(a,b)c;\n'
+        t = TreeNode.read(io.StringIO(nwk))
         obs = t.root_at_midpoint()
         self.assertEqual(str(obs), nwk)
 
     def test_root_at_midpoint_tie(self):
-        nwk = u"(((a:1,b:1)c:2,(d:3,e:4)f:5),g:1)root;"
-        t = TreeNode.read(StringIO(nwk))
-        exp = u"((d:3,e:4)f:2,((a:1,b:1)c:2,(g:1)):3)root;"
-        texp = TreeNode.read(StringIO(exp))
+        nwk = "(((a:1,b:1)c:2,(d:3,e:4)f:5),g:1)root;"
+        t = TreeNode.read(io.StringIO(nwk))
+        exp = "((d:3,e:4)f:2,((a:1,b:1)c:2,(g:1)):3)root;"
+        texp = TreeNode.read(io.StringIO(exp))
 
         obs = t.root_at_midpoint()
 
@@ -913,9 +937,9 @@ class TreeTests(TestCase):
 
     def test_compare_subsets(self):
         """compare_subsets should return the fraction of shared subsets"""
-        t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
-        t2 = TreeNode.read(StringIO(u'(((H,G),R),M);'))
-        t4 = TreeNode.read(StringIO(u'(((H,G),(O,R)),X);'))
+        t = TreeNode.read(io.StringIO('((H,G),(R,M));'))
+        t2 = TreeNode.read(io.StringIO('(((H,G),R),M);'))
+        t4 = TreeNode.read(io.StringIO('(((H,G),(O,R)),X);'))
 
         result = t.compare_subsets(t)
         self.assertEqual(result, 0)
@@ -940,9 +964,9 @@ class TreeTests(TestCase):
 
     def test_compare_rfd(self):
         """compare_rfd should return the Robinson Foulds distance"""
-        t = TreeNode.read(StringIO(u'((H,G),(R,M));'))
-        t2 = TreeNode.read(StringIO(u'(((H,G),R),M);'))
-        t4 = TreeNode.read(StringIO(u'(((H,G),(O,R)),X);'))
+        t = TreeNode.read(io.StringIO('((H,G),(R,M));'))
+        t2 = TreeNode.read(io.StringIO('(((H,G),R),M);'))
+        t4 = TreeNode.read(io.StringIO('(((H,G),(O,R)),X);'))
 
         obs = t.compare_rfd(t2)
         exp = 2.0
@@ -959,9 +983,9 @@ class TreeTests(TestCase):
 
     def test_assign_ids(self):
         """Assign IDs to the tree"""
-        t1 = TreeNode.read(StringIO(u"(((a,b),c),(e,f),(g));"))
-        t2 = TreeNode.read(StringIO(u"(((a,b),c),(e,f),(g));"))
-        t3 = TreeNode.read(StringIO(u"((g),(e,f),(c,(a,b)));"))
+        t1 = TreeNode.read(io.StringIO("(((a,b),c),(e,f),(g));"))
+        t2 = TreeNode.read(io.StringIO("(((a,b),c),(e,f),(g));"))
+        t3 = TreeNode.read(io.StringIO("((g),(e,f),(c,(a,b)));"))
         t1_copy = t1.copy()
 
         t1.assign_ids()
@@ -978,9 +1002,9 @@ class TreeTests(TestCase):
 
     def test_assign_ids_index_tree(self):
         """assign_ids and index_tree should assign the same IDs"""
-        t1 = TreeNode.read(StringIO(u'(((a,b),c),(d,e));'))
-        t2 = TreeNode.read(StringIO(u'(((a,b),(c,d)),(e,f));'))
-        t3 = TreeNode.read(StringIO(u'(((a,b,c),(d)),(e,f));'))
+        t1 = TreeNode.read(io.StringIO('(((a,b),c),(d,e));'))
+        t2 = TreeNode.read(io.StringIO('(((a,b),(c,d)),(e,f));'))
+        t3 = TreeNode.read(io.StringIO('(((a,b,c),(d)),(e,f));'))
         t1_copy = t1.copy()
         t2_copy = t2.copy()
         t3_copy = t3.copy()
@@ -1001,7 +1025,7 @@ class TreeTests(TestCase):
 
     def test_unrooted_deepcopy(self):
         """Do an unrooted_copy"""
-        t = TreeNode.read(StringIO(u"((a,(b,c)d)e,(f,g)h)i;"))
+        t = TreeNode.read(io.StringIO("((a,(b,c)d)e,(f,g)h)i;"))
         exp = "(b,c,(a,((f,g)h)e)d)root;\n"
         obs = t.find('d').unrooted_deepcopy()
         self.assertEqual(str(obs), exp)
@@ -1013,8 +1037,8 @@ class TreeTests(TestCase):
 
     def test_descending_branch_length(self):
         """Calculate descending branch_length"""
-        tr = TreeNode.read(StringIO(u"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4"
-                                    ",(H:.4,I:.5)J:1.3)K;"))
+        tr = TreeNode.read(io.StringIO(
+            "(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         sdbl = tr.descending_branch_length(['A', 'E'])
         npt.assert_almost_equal(tdbl, 8.9)
@@ -1023,36 +1047,36 @@ class TreeTests(TestCase):
                           ['A', 'DNE'])
         self.assertRaises(ValueError, tr.descending_branch_length, ['A', 'C'])
 
-        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
-                                    ":.4,I:.5)J:1.3)K;"))
+        tr = TreeNode.read(io.StringIO(
+            "(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         npt.assert_almost_equal(tdbl, 8.8)
 
-        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
-                                    ",I:.5)J:1.3)K;"))
+        tr = TreeNode.read(io.StringIO(
+            "(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length()
         npt.assert_almost_equal(tdbl, 7.9)
 
-        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4"
-                                    ",I:.5)J:1.3)K;"))
+        tr = TreeNode.read(io.StringIO(
+            "(((A,B:1.2)C:.6,(D:.9,E:.6)F)G:2.4,(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length(['A', 'D', 'E'])
         npt.assert_almost_equal(tdbl, 2.1)
 
-        tr = TreeNode.read(StringIO(u"(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H"
-                                    ":.4,I:.5)J:1.3)K;"))
+        tr = TreeNode.read(io.StringIO(
+            "(((A,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,(H:.4,I:.5)J:1.3)K;"))
         tdbl = tr.descending_branch_length(['I', 'D', 'E'])
         npt.assert_almost_equal(tdbl, 6.6)
 
         # test with a situation where we have unnamed internal nodes
-        tr = TreeNode.read(StringIO(u"(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I"
-                                    ":.5)J:1.3);"))
+        tr = TreeNode.read(io.StringIO(
+            "(((A,B:1.2):.6,(D:.9,E:.6)F):2.4,(H:.4,I:.5)J:1.3);"))
         tdbl = tr.descending_branch_length()
         npt.assert_almost_equal(tdbl, 7.9)
 
     def test_to_array(self):
         """Convert a tree to arrays"""
-        t = TreeNode.read(StringIO(
-            u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
+        t = TreeNode.read(io.StringIO(
+            '(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
         id_index, child_index = t.index_tree()
         arrayed = t.to_array()
 
@@ -1073,8 +1097,8 @@ class TreeTests(TestCase):
         npt.assert_equal(obs, exp)
 
     def test_to_array_attrs(self):
-        t = TreeNode.read(StringIO(
-            u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
+        t = TreeNode.read(io.StringIO(
+            '(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7,(e:8,f:9)z:10);'))
         id_index, child_index = t.index_tree()
         arrayed = t.to_array(attrs=[('name', object)])
 
@@ -1095,7 +1119,7 @@ class TreeTests(TestCase):
             t.to_array(attrs=[('name', object), ('brofist', int)])
 
     def test_to_array_nan_length_value(self):
-        t = TreeNode.read(StringIO(u"((a:1, b:2)c:3)root;"))
+        t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root;"))
         indexed = t.to_array(nan_length_value=None)
         npt.assert_equal(indexed['length'],
                          np.array([1, 2, 3, np.nan], dtype=float))
@@ -1106,12 +1130,12 @@ class TreeTests(TestCase):
         npt.assert_equal(indexed['length'],
                          np.array([1, 2, 3, 42.0], dtype=float))
 
-        t = TreeNode.read(StringIO(u"((a:1, b:2)c:3)root:4;"))
+        t = TreeNode.read(io.StringIO("((a:1, b:2)c:3)root:4;"))
         indexed = t.to_array(nan_length_value=42.0)
         npt.assert_equal(indexed['length'],
                          np.array([1, 2, 3, 4], dtype=float))
 
-        t = TreeNode.read(StringIO(u"((a:1, b:2)c)root;"))
+        t = TreeNode.read(io.StringIO("((a:1, b:2)c)root;"))
         indexed = t.to_array(nan_length_value=42.0)
         npt.assert_equal(indexed['length'],
                          np.array([1, 2, 42.0, 42.0], dtype=float))
@@ -1122,13 +1146,20 @@ class TreeTests(TestCase):
                           '3': ['h', 'i', 'j', 'k', 'l', 'm', 'n'],
                           '4': ['h', 'i', 'j', 'k', 'l', 'm', 'q'],
                           '5': ['h', 'i', 'j', 'k', 'l', 'm', 'n']}
-        exp = TreeNode.read(StringIO(u"((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
-                                     "(((((((3,5)n,(4)q)m)l)k)j)i)h);"))
+        exp = TreeNode.read(io.StringIO(
+            "((((((((1)g)f)e)d,((((2)y)x)))c)b)a,"
+            "(((((((3,5)n,(4)q)m)l)k)j)i)h);"))
 
         root = TreeNode.from_taxonomy(input_lineages.items())
 
+        self.assertIs(type(root), TreeNode)
+
         self.assertEqual(root.compare_subsets(exp), 0.0)
 
+        root = TreeNodeSubclass.from_taxonomy(input_lineages.items())
+
+        self.assertIs(type(root), TreeNodeSubclass)
+
     def test_to_taxonomy(self):
         input_lineages = {'1': ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
                           '2': ['a', 'b', 'c', None, None, 'x', 'y'],
@@ -1169,10 +1200,17 @@ class TreeTests(TestCase):
                               [4.0, 11.0, 34.0,  7.0]])
 
         tree = TreeNode.from_linkage_matrix(linkage, id_list)
+
+        self.assertIs(type(tree), TreeNode)
+
         self.assertEqual("(E:17.0,(C:14.5,((A:4.0,D:4.0):4.25,(G:6.25,(B:0.5,"
                          "F:0.5):5.75):2.0):6.25):2.5);\n",
                          str(tree))
 
+        tree = TreeNodeSubclass.from_linkage_matrix(linkage, id_list)
+
+        self.assertIs(type(tree), TreeNodeSubclass)
+
     def test_shuffle_invalid_iter(self):
         shuffler = self.simple_t.shuffle(n=-1)
         with self.assertRaises(ValueError):
diff --git a/skbio/util/__init__.py b/skbio/util/__init__.py
index 92b5bdd..67c5c70 100644
--- a/skbio/util/__init__.py
+++ b/skbio/util/__init__.py
@@ -56,8 +56,6 @@ Warnings
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 from ._warning import EfficiencyWarning, RepresentationWarning, SkbioWarning
 from ._misc import (cardinal_to_ordinal, create_dir, find_duplicates,
                     is_casava_v180_or_later, remove_files, safe_md5)
diff --git a/skbio/util/_decorator.py b/skbio/util/_decorator.py
index 7208159..8d08274 100644
--- a/skbio/util/_decorator.py
+++ b/skbio/util/_decorator.py
@@ -6,7 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 import warnings
 import textwrap
 
@@ -16,7 +15,7 @@ from ._exception import OverrideError
 from ._warning import DeprecationWarning as SkbioDeprecationWarning
 
 
-class _state_decorator(object):
+class _state_decorator:
     """ Base class for decorators of all public functionality.
     """
 
diff --git a/skbio/util/_exception.py b/skbio/util/_exception.py
index cc251e5..80ba7c1 100644
--- a/skbio/util/_exception.py
+++ b/skbio/util/_exception.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 
 class TestingUtilError(Exception):
     """Raised when an exception is needed to test exception handling."""
diff --git a/skbio/util/_misc.py b/skbio/util/_misc.py
index 48a87b0..3fae35a 100644
--- a/skbio/util/_misc.py
+++ b/skbio/util/_misc.py
@@ -6,15 +6,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import hashlib
 from os import remove, makedirs
 from os.path import exists, isdir
 from functools import partial
 from types import FunctionType
 import inspect
-from ._decorator import experimental
+from ._decorator import experimental, deprecated
 
 
 def resolve_key(obj, key):
@@ -29,7 +27,7 @@ def resolve_key(obj, key):
 
 
 def make_sentinel(name):
-    return type(name, (object, ), {
+    return type(name, (), {
         '__repr__': lambda s: name,
         '__str__': lambda s: name,
         '__class__': None
@@ -37,22 +35,9 @@ def make_sentinel(name):
 
 
 def find_sentinels(function, sentinel):
-    keys = []
-    if hasattr(inspect, 'signature'):
-        params = inspect.signature(function).parameters
-        for name, param in params.items():
-            if param.default is sentinel:
-                keys.append(name)
-    else:  # Py2
-        function_spec = inspect.getargspec(function)
-        if function_spec.defaults is not None:
-            # Concept from http://stackoverflow.com/a/12627202/579416
-            keywords_start = -len(function_spec.defaults)
-            for key, default in zip(function_spec.args[keywords_start:],
-                                    function_spec.defaults):
-                if default is sentinel:
-                    keys.append(key)
-    return keys
+    params = inspect.signature(function).parameters
+    return [name for name, param in params.items()
+            if param.default is sentinel]
 
 
 class MiniRegistry(dict):
@@ -78,15 +63,10 @@ class MiniRegistry(dict):
 
     def interpolate(self, obj, name):
         """Inject the formatted listing in the second blank line of `name`."""
-        # Py2/3 compatible way of calling getattr(obj, name).__func__
-        f = getattr(obj, name).__get__(None, type(None))
+        f = getattr(obj, name)
+        f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
+                          argdefs=f.__defaults__, closure=f.__closure__)
 
-        if hasattr(f, 'func_code'):
-            f2 = FunctionType(f.func_code, f.func_globals, name=f.func_name,
-                              argdefs=f.func_defaults, closure=f.func_closure)
-        else:
-            f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
-                              argdefs=f.__defaults__, closure=f.__closure__)
         # Conveniently the original docstring is on f2, not the new ones if
         # inheritence is happening. I have no idea why.
         t = f2.__doc__.split("\n\n")
@@ -153,7 +133,10 @@ def cardinal_to_ordinal(n):
     return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
 
 
- at experimental(as_of="0.4.0")
+ at deprecated(as_of='0.5.0', until='0.5.1',
+            reason='This functionality will be moved to the '
+                   'fastq sniffer, where it will be more useful as it will '
+                   'determine the variant of a fastq file.')
 def is_casava_v180_or_later(header_line):
     """Check if the header looks like it is Illumina software post-casava v1.8
 
@@ -226,7 +209,9 @@ def safe_md5(open_file, block_size=2 ** 20):
     return md5
 
 
- at experimental(as_of="0.4.0")
+ at deprecated(as_of="0.5.0", until="0.5.1",
+            reason="Deprecated in favor of solutions present in Python "
+                   "standard library.")
 def remove_files(list_of_filepaths, error_on_missing=True):
     """Remove list of filepaths, optionally raising an error if any are missing
 
@@ -268,7 +253,9 @@ def remove_files(list_of_filepaths, error_on_missing=True):
                       '\t'.join(missing))
 
 
- at experimental(as_of="0.4.0")
+ at deprecated(as_of="0.5.0", until="0.5.1",
+            reason="Deprecated in favor of solutions present in Python "
+                   "standard library.")
 def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
     """Create a directory safely and fail meaningfully
 
diff --git a/skbio/util/_testing.py b/skbio/util/_testing.py
index 92c111c..04465dd 100644
--- a/skbio/util/_testing.py
+++ b/skbio/util/_testing.py
@@ -6,18 +6,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.utils import PY3
-
-import copy
 import os
 import inspect
 import warnings
 
-import six
-import pandas as pd
 import nose
-
 import numpy as np
 import numpy.testing as npt
 import pandas.util.testing as pdt
@@ -26,7 +19,7 @@ from skbio.util import SkbioWarning
 from ._decorator import experimental
 
 
-class ReallyEqualMixin(object):
+class ReallyEqualMixin:
     """Use this for testing __eq__/__ne__.
 
     Taken and modified from the following public domain code:
@@ -44,13 +37,6 @@ class ReallyEqualMixin(object):
         self.assertFalse(a != b)
         self.assertFalse(b != a)
 
-        # We do not support cmp/__cmp__ because they do not exist in Python 3.
-        # However, we still test this to catch potential bugs where the
-        # object's parent class defines a __cmp__.
-        if not PY3:
-            self.assertEqual(0, cmp(a, b))  # noqa
-            self.assertEqual(0, cmp(b, a))  # noqa
-
     def assertReallyNotEqual(self, a, b):
         # assertNotEqual first, because it will have a good message if the
         # assertion fails.
@@ -61,836 +47,6 @@ class ReallyEqualMixin(object):
         self.assertTrue(a != b)
         self.assertTrue(b != a)
 
-        # We do not support cmp/__cmp__ because they do not exist in Python 3.
-        # However, we still test this to catch potential bugs where the
-        # object's parent class defines a __cmp__.
-        if not PY3:
-            self.assertNotEqual(0, cmp(a, b))  # noqa
-            self.assertNotEqual(0, cmp(b, a))  # noqa
-
-
-class MetadataMixinTests(object):
-    def test_constructor_invalid_type(self):
-        for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
-            with six.assertRaisesRegex(self, TypeError,
-                                       'metadata must be a dict'):
-                self._metadata_constructor_(metadata=md)
-
-    def test_constructor_no_metadata(self):
-        for md in None, {}:
-            obj = self._metadata_constructor_(metadata=md)
-
-            self.assertFalse(obj.has_metadata())
-            self.assertEqual(obj.metadata, {})
-
-    def test_constructor_with_metadata(self):
-        obj = self._metadata_constructor_(metadata={'foo': 'bar'})
-        self.assertEqual(obj.metadata, {'foo': 'bar'})
-
-        obj = self._metadata_constructor_(
-                metadata={'': '', 123: {'a': 'b', 'c': 'd'}})
-        self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})
-
-    def test_constructor_handles_missing_metadata_efficiently(self):
-        self.assertIsNone(self._metadata_constructor_()._metadata)
-        self.assertIsNone(self._metadata_constructor_(metadata=None)._metadata)
-
-    def test_constructor_makes_shallow_copy_of_metadata(self):
-        md = {'foo': 'bar', 42: []}
-        obj = self._metadata_constructor_(metadata=md)
-
-        self.assertEqual(obj.metadata, md)
-        self.assertIsNot(obj.metadata, md)
-
-        md['foo'] = 'baz'
-        self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
-
-        md[42].append(True)
-        self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
-
-    def test_eq(self):
-        self.assertReallyEqual(
-                self._metadata_constructor_(metadata={'foo': 42}),
-                self._metadata_constructor_(metadata={'foo': 42}))
-
-        self.assertReallyEqual(
-                self._metadata_constructor_(metadata={'foo': 42, 123: {}}),
-                self._metadata_constructor_(metadata={'foo': 42, 123: {}}))
-
-    def test_eq_missing_metadata(self):
-        self.assertReallyEqual(self._metadata_constructor_(),
-                               self._metadata_constructor_())
-        self.assertReallyEqual(self._metadata_constructor_(),
-                               self._metadata_constructor_(metadata={}))
-        self.assertReallyEqual(self._metadata_constructor_(metadata={}),
-                               self._metadata_constructor_(metadata={}))
-
-    def test_eq_handles_missing_metadata_efficiently(self):
-        obj1 = self._metadata_constructor_()
-        obj2 = self._metadata_constructor_()
-        self.assertReallyEqual(obj1, obj2)
-
-        self.assertIsNone(obj1._metadata)
-        self.assertIsNone(obj2._metadata)
-
-    def test_ne(self):
-        # Both have metadata.
-        obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
-        obj2 = self._metadata_constructor_(metadata={'id': 'bar'})
-        self.assertReallyNotEqual(obj1, obj2)
-
-        # One has metadata.
-        obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
-        obj2 = self._metadata_constructor_()
-        self.assertReallyNotEqual(obj1, obj2)
-
-    def test_copy_metadata_none(self):
-        obj = self._metadata_constructor_()
-        obj_copy = copy.copy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNone(obj._metadata)
-        self.assertIsNone(obj_copy._metadata)
-
-    def test_copy_metadata_empty(self):
-        obj = self._metadata_constructor_(metadata={})
-        obj_copy = copy.copy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertEqual(obj._metadata, {})
-        self.assertIsNone(obj_copy._metadata)
-
-    def test_copy_with_metadata(self):
-        obj = self._metadata_constructor_(metadata={'foo': [1]})
-        obj_copy = copy.copy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNot(obj._metadata, obj_copy._metadata)
-        self.assertIs(obj._metadata['foo'], obj_copy._metadata['foo'])
-
-        obj_copy.metadata['foo'].append(2)
-        obj_copy.metadata['foo2'] = 42
-
-        self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
-        self.assertEqual(obj.metadata, {'foo': [1, 2]})
-
-    def test_deepcopy_metadata_none(self):
-        obj = self._metadata_constructor_()
-        obj_copy = copy.deepcopy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNone(obj._metadata)
-        self.assertIsNone(obj_copy._metadata)
-
-    def test_deepcopy_metadata_empty(self):
-        obj = self._metadata_constructor_(metadata={})
-        obj_copy = copy.deepcopy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertEqual(obj._metadata, {})
-        self.assertIsNone(obj_copy._metadata)
-
-    def test_deepcopy_with_metadata(self):
-        obj = self._metadata_constructor_(metadata={'foo': [1]})
-        obj_copy = copy.deepcopy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNot(obj._metadata, obj_copy._metadata)
-        self.assertIsNot(obj._metadata['foo'], obj_copy._metadata['foo'])
-
-        obj_copy.metadata['foo'].append(2)
-        obj_copy.metadata['foo2'] = 42
-
-        self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
-        self.assertEqual(obj.metadata, {'foo': [1]})
-
-    def test_deepcopy_memo_is_respected(self):
-        # Basic test to ensure deepcopy's memo is passed through to recursive
-        # deepcopy calls.
-        obj = self._metadata_constructor_(metadata={'foo': 'bar'})
-        memo = {}
-        copy.deepcopy(obj, memo)
-        self.assertGreater(len(memo), 2)
-
-    def test_metadata_getter(self):
-        obj = self._metadata_constructor_(
-                metadata={42: 'foo', ('hello', 'world'): 43})
-
-        self.assertIsInstance(obj.metadata, dict)
-        self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})
-
-        obj.metadata[42] = 'bar'
-        self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})
-
-    def test_metadata_getter_no_metadata(self):
-        obj = self._metadata_constructor_()
-
-        self.assertIsNone(obj._metadata)
-        self.assertIsInstance(obj.metadata, dict)
-        self.assertEqual(obj.metadata, {})
-        self.assertIsNotNone(obj._metadata)
-
-    def test_metadata_setter(self):
-        obj = self._metadata_constructor_()
-
-        self.assertFalse(obj.has_metadata())
-
-        obj.metadata = {'hello': 'world'}
-        self.assertTrue(obj.has_metadata())
-        self.assertEqual(obj.metadata, {'hello': 'world'})
-
-        obj.metadata = {}
-        self.assertFalse(obj.has_metadata())
-        self.assertEqual(obj.metadata, {})
-
-    def test_metadata_setter_makes_shallow_copy(self):
-        obj = self._metadata_constructor_()
-
-        md = {'foo': 'bar', 42: []}
-        obj.metadata = md
-
-        self.assertEqual(obj.metadata, md)
-        self.assertIsNot(obj.metadata, md)
-
-        md['foo'] = 'baz'
-        self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
-
-        md[42].append(True)
-        self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
-
-    def test_metadata_setter_invalid_type(self):
-        obj = self._metadata_constructor_(metadata={123: 456})
-
-        for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
-                   pd.DataFrame()):
-            with six.assertRaisesRegex(self, TypeError,
-                                       'metadata must be a dict'):
-                obj.metadata = md
-            self.assertEqual(obj.metadata, {123: 456})
-
-    def test_metadata_deleter(self):
-        obj = self._metadata_constructor_(metadata={'foo': 'bar'})
-
-        self.assertEqual(obj.metadata, {'foo': 'bar'})
-
-        del obj.metadata
-        self.assertIsNone(obj._metadata)
-        self.assertFalse(obj.has_metadata())
-
-        # Delete again.
-        del obj.metadata
-        self.assertIsNone(obj._metadata)
-        self.assertFalse(obj.has_metadata())
-
-        obj = self._metadata_constructor_()
-
-        self.assertIsNone(obj._metadata)
-        self.assertFalse(obj.has_metadata())
-        del obj.metadata
-        self.assertIsNone(obj._metadata)
-        self.assertFalse(obj.has_metadata())
-
-    def test_has_metadata(self):
-        obj = self._metadata_constructor_()
-
-        self.assertFalse(obj.has_metadata())
-        # Handles metadata efficiently.
-        self.assertIsNone(obj._metadata)
-
-        self.assertFalse(
-                self._metadata_constructor_(metadata={}).has_metadata())
-
-        self.assertTrue(
-                self._metadata_constructor_(metadata={'': ''}).has_metadata())
-        self.assertTrue(
-                self._metadata_constructor_(
-                        metadata={'foo': 42}).has_metadata())
-
-
-class PositionalMetadataMixinTests(object):
-    def test_constructor_invalid_positional_metadata_type(self):
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Invalid positional metadata. Must be '
-                                   'consumable by `pd.DataFrame` constructor. '
-                                   'Original pandas error message: '):
-            self._positional_metadata_constructor_(0, positional_metadata=2)
-
-    def test_constructor_positional_metadata_len_mismatch(self):
-        # Zero elements.
-        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
-            self._positional_metadata_constructor_(4, positional_metadata=[])
-
-        # Not enough elements.
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
-            self._positional_metadata_constructor_(
-                4, positional_metadata=[2, 3, 4])
-
-        # Too many elements.
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
-            self._positional_metadata_constructor_(
-                4, positional_metadata=[2, 3, 4, 5, 6])
-
-        # Series not enough rows.
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
-            self._positional_metadata_constructor_(
-                4, positional_metadata=pd.Series(range(3)))
-
-        # Series too many rows.
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
-            self._positional_metadata_constructor_(
-                4, positional_metadata=pd.Series(range(5)))
-
-        # DataFrame not enough rows.
-        with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
-            self._positional_metadata_constructor_(
-                4, positional_metadata=pd.DataFrame({'quality': range(3)}))
-
-        # DataFrame too many rows.
-        with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
-            self._positional_metadata_constructor_(
-                4, positional_metadata=pd.DataFrame({'quality': range(5)}))
-
-    def test_constructor_no_positional_metadata(self):
-        # Length zero with missing/empty positional metadata.
-        for empty in None, {}, pd.DataFrame():
-            obj = self._positional_metadata_constructor_(
-                0, positional_metadata=empty)
-
-            self.assertFalse(obj.has_positional_metadata())
-            assert_data_frame_almost_equal(obj.positional_metadata,
-                                           pd.DataFrame(index=np.arange(0)))
-
-        # Nonzero length with missing positional metadata.
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata=None)
-
-        self.assertFalse(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-
-    def test_constructor_with_positional_metadata_len_zero(self):
-        for data in [], (), np.array([]):
-            obj = self._positional_metadata_constructor_(
-                0, positional_metadata={'foo': data})
-
-            self.assertTrue(obj.has_positional_metadata())
-            assert_data_frame_almost_equal(
-                obj.positional_metadata,
-                pd.DataFrame({'foo': data}, index=np.arange(0)))
-
-    def test_constructor_with_positional_metadata_len_one(self):
-        for data in [2], (2, ), np.array([2]):
-            obj = self._positional_metadata_constructor_(
-                1, positional_metadata={'foo': data})
-
-            self.assertTrue(obj.has_positional_metadata())
-            assert_data_frame_almost_equal(
-                obj.positional_metadata,
-                pd.DataFrame({'foo': data}, index=np.arange(1)))
-
-    def test_constructor_with_positional_metadata_len_greater_than_one(self):
-        for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
-                     (0, 42, 42, 1, 0, 8, 100, 0, 0),
-                     np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
-            obj = self._positional_metadata_constructor_(
-                9, positional_metadata={'foo': data})
-
-            self.assertTrue(obj.has_positional_metadata())
-            assert_data_frame_almost_equal(
-                obj.positional_metadata,
-                pd.DataFrame({'foo': data}, index=np.arange(9)))
-
-    def test_constructor_with_positional_metadata_multiple_columns(self):
-        obj = self._positional_metadata_constructor_(
-            5, positional_metadata={'foo': np.arange(5),
-                                    'bar': np.arange(5)[::-1]})
-
-        self.assertTrue(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': np.arange(5),
-                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
-
-    def test_constructor_with_positional_metadata_custom_index(self):
-        df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
-                          index=['a', 'b', 'c', 'd', 'e'])
-        obj = self._positional_metadata_constructor_(
-            5, positional_metadata=df)
-
-        self.assertTrue(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': np.arange(5),
-                          'bar': np.arange(5)[::-1]}, index=np.arange(5)))
-
-    def test_constructor_handles_missing_positional_metadata_efficiently(self):
-        obj = self._positional_metadata_constructor_(4)
-        self.assertIsNone(obj._positional_metadata)
-
-        obj = self._positional_metadata_constructor_(
-            4, positional_metadata=None)
-        self.assertIsNone(obj._positional_metadata)
-
-    def test_constructor_makes_shallow_copy_of_positional_metadata(self):
-        df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                          index=['a', 'b', 'c'])
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata=df)
-
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
-        self.assertIsNot(obj.positional_metadata, df)
-
-        # Original df is not mutated.
-        orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                               index=['a', 'b', 'c'])
-        assert_data_frame_almost_equal(df, orig_df)
-
-        # Change values of column (using same dtype).
-        df['foo'] = [42, 42, 42]
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
-
-        # Change single value of underlying data.
-        df.values[0][0] = 10
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
-
-        # Mutate list (not a deep copy).
-        df['bar'][0].append(42)
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
-                         index=np.arange(3)))
-
-    def test_eq_basic(self):
-        obj1 = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 3]})
-        obj2 = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 3]})
-        self.assertReallyEqual(obj1, obj2)
-
-    def test_eq_from_different_source(self):
-        obj1 = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': np.array([1, 2, 3])})
-        obj2 = self._positional_metadata_constructor_(
-            3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},
-                                                index=['foo', 'bar', 'baz']))
-        self.assertReallyEqual(obj1, obj2)
-
-    def test_eq_missing_positional_metadata(self):
-        for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):
-            obj = self._positional_metadata_constructor_(
-                0, positional_metadata=empty)
-
-            self.assertReallyEqual(
-                obj,
-                self._positional_metadata_constructor_(0))
-            self.assertReallyEqual(
-                obj,
-                self._positional_metadata_constructor_(
-                    0, positional_metadata=empty))
-
-        for empty in None, pd.DataFrame(index=['a', 'b']):
-            obj = self._positional_metadata_constructor_(
-                2, positional_metadata=empty)
-
-            self.assertReallyEqual(
-                obj,
-                self._positional_metadata_constructor_(2))
-            self.assertReallyEqual(
-                obj,
-                self._positional_metadata_constructor_(
-                    2, positional_metadata=empty))
-
-    def test_eq_handles_missing_positional_metadata_efficiently(self):
-        obj1 = self._positional_metadata_constructor_(1)
-        obj2 = self._positional_metadata_constructor_(1)
-        self.assertReallyEqual(obj1, obj2)
-
-        self.assertIsNone(obj1._positional_metadata)
-        self.assertIsNone(obj2._positional_metadata)
-
-    def test_ne_len_zero(self):
-        # Both have positional metadata.
-        obj1 = self._positional_metadata_constructor_(
-            0, positional_metadata={'foo': []})
-        obj2 = self._positional_metadata_constructor_(
-            0, positional_metadata={'foo': [], 'bar': []})
-        self.assertReallyNotEqual(obj1, obj2)
-
-        # One has positional metadata.
-        obj1 = self._positional_metadata_constructor_(
-            0, positional_metadata={'foo': []})
-        obj2 = self._positional_metadata_constructor_(0)
-        self.assertReallyNotEqual(obj1, obj2)
-
-    def test_ne_len_greater_than_zero(self):
-        # Both have positional metadata.
-        obj1 = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 3]})
-        obj2 = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 2]})
-        self.assertReallyNotEqual(obj1, obj2)
-
-        # One has positional metadata.
-        obj1 = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 3]})
-        obj2 = self._positional_metadata_constructor_(3)
-        self.assertReallyNotEqual(obj1, obj2)
-
-    def test_copy_positional_metadata_none(self):
-        obj = self._positional_metadata_constructor_(3)
-        obj_copy = copy.copy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNone(obj._positional_metadata)
-        self.assertIsNone(obj_copy._positional_metadata)
-
-    def test_copy_positional_metadata_empty(self):
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata=pd.DataFrame(index=range(3)))
-        obj_copy = copy.copy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        assert_data_frame_almost_equal(obj._positional_metadata,
-                                       pd.DataFrame(index=range(3)))
-        self.assertIsNone(obj_copy._positional_metadata)
-
-    def test_copy_with_positional_metadata(self):
-        obj = self._positional_metadata_constructor_(
-            4, positional_metadata={'bar': [[], [], [], []],
-                                    'baz': [42, 42, 42, 42]})
-        obj_copy = copy.copy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNot(obj._positional_metadata,
-                         obj_copy._positional_metadata)
-        self.assertIsNot(obj._positional_metadata.values,
-                         obj_copy._positional_metadata.values)
-        self.assertIs(obj._positional_metadata.loc[0, 'bar'],
-                      obj_copy._positional_metadata.loc[0, 'bar'])
-
-        obj_copy.positional_metadata.loc[0, 'bar'].append(1)
-        obj_copy.positional_metadata.loc[0, 'baz'] = 43
-
-        assert_data_frame_almost_equal(
-            obj_copy.positional_metadata,
-            pd.DataFrame({'bar': [[1], [], [], []],
-                          'baz': [43, 42, 42, 42]}))
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'bar': [[1], [], [], []],
-                          'baz': [42, 42, 42, 42]}))
-
-    def test_deepcopy_positional_metadata_none(self):
-        obj = self._positional_metadata_constructor_(3)
-        obj_copy = copy.deepcopy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNone(obj._positional_metadata)
-        self.assertIsNone(obj_copy._positional_metadata)
-
-    def test_deepcopy_positional_metadata_empty(self):
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata=pd.DataFrame(index=range(3)))
-        obj_copy = copy.deepcopy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        assert_data_frame_almost_equal(obj._positional_metadata,
-                                       pd.DataFrame(index=range(3)))
-        self.assertIsNone(obj_copy._positional_metadata)
-
-    def test_deepcopy_with_positional_metadata(self):
-        obj = self._positional_metadata_constructor_(
-            4, positional_metadata={'bar': [[], [], [], []],
-                                    'baz': [42, 42, 42, 42]})
-        obj_copy = copy.deepcopy(obj)
-
-        self.assertEqual(obj, obj_copy)
-        self.assertIsNot(obj, obj_copy)
-
-        self.assertIsNot(obj._positional_metadata,
-                         obj_copy._positional_metadata)
-        self.assertIsNot(obj._positional_metadata.values,
-                         obj_copy._positional_metadata.values)
-        self.assertIsNot(obj._positional_metadata.loc[0, 'bar'],
-                         obj_copy._positional_metadata.loc[0, 'bar'])
-
-        obj_copy.positional_metadata.loc[0, 'bar'].append(1)
-        obj_copy.positional_metadata.loc[0, 'baz'] = 43
-
-        assert_data_frame_almost_equal(
-            obj_copy.positional_metadata,
-            pd.DataFrame({'bar': [[1], [], [], []],
-                          'baz': [43, 42, 42, 42]}))
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'bar': [[], [], [], []],
-                          'baz': [42, 42, 42, 42]}))
-
-    def test_deepcopy_memo_is_respected(self):
-        # Basic test to ensure deepcopy's memo is passed through to recursive
-        # deepcopy calls.
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 3]})
-        memo = {}
-        copy.deepcopy(obj, memo)
-        self.assertGreater(len(memo), 2)
-
-    def test_positional_metadata_getter(self):
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [22, 22, 0]})
-
-        self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [22, 22, 0]}))
-
-        # Update existing column.
-        obj.positional_metadata['foo'] = [42, 42, 43]
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [42, 42, 43]}))
-
-        # Add new column.
-        obj.positional_metadata['foo2'] = [True, False, True]
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [42, 42, 43],
-                          'foo2': [True, False, True]}))
-
-    def test_positional_metadata_getter_no_positional_metadata(self):
-        obj = self._positional_metadata_constructor_(4)
-
-        self.assertIsNone(obj._positional_metadata)
-        self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame(index=np.arange(4)))
-        self.assertIsNotNone(obj._positional_metadata)
-
-    def test_positional_metadata_getter_set_column_series(self):
-        length = 8
-        obj = self._positional_metadata_constructor_(
-            length, positional_metadata={'foo': range(length)})
-
-        obj.positional_metadata['bar'] = pd.Series(range(length-3))
-        # pandas.Series will be padded with NaN if too short.
-        npt.assert_equal(obj.positional_metadata['bar'],
-                         np.array(list(range(length-3)) + [np.nan]*3))
-
-        obj.positional_metadata['baz'] = pd.Series(range(length+3))
-        # pandas.Series will be truncated if too long.
-        npt.assert_equal(obj.positional_metadata['baz'],
-                         np.array(range(length)))
-
-    def test_positional_metadata_getter_set_column_array(self):
-        length = 8
-        obj = self._positional_metadata_constructor_(
-            length, positional_metadata={'foo': range(length)})
-
-        # array-like objects will fail if wrong size.
-        for array_like in (np.array(range(length-1)), range(length-1),
-                           np.array(range(length+1)), range(length+1)):
-            with six.assertRaisesRegex(self, ValueError,
-                                       "Length of values does not match "
-                                       "length of index"):
-                obj.positional_metadata['bar'] = array_like
-
-    def test_positional_metadata_setter_pandas_consumable(self):
-        obj = self._positional_metadata_constructor_(3)
-
-        self.assertFalse(obj.has_positional_metadata())
-
-        obj.positional_metadata = {'foo': [3, 2, 1]}
-        self.assertTrue(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [3, 2, 1]}))
-
-        obj.positional_metadata = pd.DataFrame(index=np.arange(3))
-        self.assertFalse(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-
-    def test_positional_metadata_setter_data_frame(self):
-        obj = self._positional_metadata_constructor_(3)
-
-        self.assertFalse(obj.has_positional_metadata())
-
-        obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},
-                                               index=['a', 'b', 'c'])
-        self.assertTrue(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [3, 2, 1]}))
-
-        obj.positional_metadata = pd.DataFrame(index=np.arange(3))
-        self.assertFalse(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(3)))
-
-    def test_positional_metadata_setter_none(self):
-        obj = self._positional_metadata_constructor_(
-            0, positional_metadata={'foo': []})
-
-        self.assertTrue(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': []}))
-
-        # `None` behavior differs from constructor.
-        obj.positional_metadata = None
-
-        self.assertFalse(obj.has_positional_metadata())
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame(index=np.arange(0)))
-
-    def test_positional_metadata_setter_makes_shallow_copy(self):
-        obj = self._positional_metadata_constructor_(3)
-
-        df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                          index=['a', 'b', 'c'])
-        obj.positional_metadata = df
-
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
-        self.assertIsNot(obj.positional_metadata, df)
-
-        # Original df is not mutated.
-        orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                               index=['a', 'b', 'c'])
-        assert_data_frame_almost_equal(df, orig_df)
-
-        # Change values of column (using same dtype).
-        df['foo'] = [42, 42, 42]
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
-
-        # Change single value of underlying data.
-        df.values[0][0] = 10
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
-                         index=np.arange(3)))
-
-        # Mutate list (not a deep copy).
-        df['bar'][0].append(42)
-        assert_data_frame_almost_equal(
-            obj.positional_metadata,
-            pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
-                         index=np.arange(3)))
-
-    def test_positional_metadata_setter_invalid_type(self):
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 42]})
-
-        with six.assertRaisesRegex(self, TypeError,
-                                   'Invalid positional metadata. Must be '
-                                   'consumable by `pd.DataFrame` constructor. '
-                                   'Original pandas error message: '):
-            obj.positional_metadata = 2
-
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 42]}))
-
-    def test_positional_metadata_setter_len_mismatch(self):
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 42]})
-
-        # `None` behavior differs from constructor.
-        with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
-            obj.positional_metadata = None
-
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 42]}))
-
-        with six.assertRaisesRegex(self, ValueError, '\(4\).*\(3\)'):
-            obj.positional_metadata = [1, 2, 3, 4]
-
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 42]}))
-
-    def test_positional_metadata_deleter(self):
-        obj = self._positional_metadata_constructor_(
-            3, positional_metadata={'foo': [1, 2, 3]})
-
-        assert_data_frame_almost_equal(obj.positional_metadata,
-                                       pd.DataFrame({'foo': [1, 2, 3]}))
-
-        del obj.positional_metadata
-        self.assertIsNone(obj._positional_metadata)
-        self.assertFalse(obj.has_positional_metadata())
-
-        # Delete again.
-        del obj.positional_metadata
-        self.assertIsNone(obj._positional_metadata)
-        self.assertFalse(obj.has_positional_metadata())
-
-        obj = self._positional_metadata_constructor_(3)
-
-        self.assertIsNone(obj._positional_metadata)
-        self.assertFalse(obj.has_positional_metadata())
-        del obj.positional_metadata
-        self.assertIsNone(obj._positional_metadata)
-        self.assertFalse(obj.has_positional_metadata())
-
-    def test_has_positional_metadata(self):
-        obj = self._positional_metadata_constructor_(4)
-        self.assertFalse(obj.has_positional_metadata())
-        self.assertIsNone(obj._positional_metadata)
-
-        obj = self._positional_metadata_constructor_(0, positional_metadata={})
-        self.assertFalse(obj.has_positional_metadata())
-
-        obj = self._positional_metadata_constructor_(
-            4, positional_metadata=pd.DataFrame(index=np.arange(4)))
-        self.assertFalse(obj.has_positional_metadata())
-
-        obj = self._positional_metadata_constructor_(
-            4, positional_metadata=pd.DataFrame(index=['a', 'b', 'c', 'd']))
-        self.assertFalse(obj.has_positional_metadata())
-
-        obj = self._positional_metadata_constructor_(
-            0, positional_metadata={'foo': []})
-        self.assertTrue(obj.has_positional_metadata())
-
-        obj = self._positional_metadata_constructor_(
-            4, positional_metadata={'foo': [1, 2, 3, 4]})
-        self.assertTrue(obj.has_positional_metadata())
-
-        obj = self._positional_metadata_constructor_(
-            2, positional_metadata={'foo': [1, 2], 'bar': ['abc', 'def']})
-        self.assertTrue(obj.has_positional_metadata())
-
 
 @nose.tools.nottest
 class SuppressSkbioWarnings(nose.plugins.Plugin):
@@ -906,7 +62,7 @@ class SuppressSkbioWarnings(nose.plugins.Plugin):
 
 
 @nose.tools.nottest
-class TestRunner(object):
+class TestRunner:
     """Simple wrapper class around nosetests functionality.
 
     Parameters
@@ -945,9 +101,8 @@ class TestRunner(object):
         """
         # NOTE: it doesn't seem to matter what the first element of the argv
         # list is, there just needs to be something there.
-        argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING']
-        if PY3:
-            argv.extend(['--with-doctest', '--doctest-tests'])
+        argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING', '--with-doctest',
+                '--doctest-tests']
         if verbose:
             argv.append('-v')
         return nose.core.run(argv=argv, defaultTest=self._test_dir,
@@ -995,7 +150,6 @@ def get_data_path(fn, subfolder='data'):
 @experimental(as_of="0.4.0")
 def assert_ordination_results_equal(left, right, ignore_method_names=False,
                                     ignore_axis_labels=False,
-                                    ignore_biplot_scores_labels=False,
                                     ignore_directionality=False,
                                     decimal=7):
     """Assert that ordination results objects are equal.
@@ -1011,8 +165,6 @@ def assert_ordination_results_equal(left, right, ignore_method_names=False,
         Ignore differences in `short_method_name` and `long_method_name`.
     ignore_axis_labels : bool, optional
         Ignore differences in axis labels (i.e., column labels).
-    ignore_biplot_scores_labels : bool, optional
-        Ignore differences in `biplot_scores` row and column labels.
     ignore_directionality : bool, optional
         Ignore differences in directionality (i.e., differences in signs) for
         attributes `samples`, `features` and `biplot_scores`.
@@ -1040,8 +192,7 @@ def assert_ordination_results_equal(left, right, ignore_method_names=False,
                         decimal=decimal)
 
     _assert_frame_equal(left.biplot_scores, right.biplot_scores,
-                        ignore_biplot_scores_labels,
-                        ignore_biplot_scores_labels,
+                        ignore_columns=ignore_axis_labels,
                         ignore_directionality=ignore_directionality,
                         decimal=decimal)
 
diff --git a/skbio/util/_warning.py b/skbio/util/_warning.py
index 2de0295..6f4cb60 100644
--- a/skbio/util/_warning.py
+++ b/skbio/util/_warning.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 
 class SkbioWarning(Warning):
     """Used to filter our warnings from warnings given by 3rd parties"""
diff --git a/skbio/util/tests/__init__.py b/skbio/util/tests/__init__.py
index 3fe3dc6..0bf0c55 100644
--- a/skbio/util/tests/__init__.py
+++ b/skbio/util/tests/__init__.py
@@ -5,5 +5,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
diff --git a/skbio/util/tests/test_decorator.py b/skbio/util/tests/test_decorator.py
index b968263..128494c 100644
--- a/skbio/util/tests/test_decorator.py
+++ b/skbio/util/tests/test_decorator.py
@@ -6,7 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
 import unittest
 import inspect
 import warnings
@@ -20,7 +19,7 @@ from skbio.util._exception import OverrideError
 
 class TestClassOnlyMethod(unittest.TestCase):
     def test_works_on_class(self):
-        class A(object):
+        class A:
             @classonlymethod
             def example(cls):
                 return cls
@@ -28,7 +27,7 @@ class TestClassOnlyMethod(unittest.TestCase):
         self.assertEqual(A.example(), A)
 
     def test_fails_on_instance(self):
-        class A(object):
+        class A:
             @classonlymethod
             def example(cls):
                 pass
@@ -40,7 +39,7 @@ class TestClassOnlyMethod(unittest.TestCase):
         self.assertIn('instance', str(e.exception))
 
     def test_matches_classmethod(self):
-        class A(object):
+        class A:
             pass
 
         def example(cls, thing):
@@ -61,7 +60,7 @@ class TestClassOnlyMethod(unittest.TestCase):
     def test_passes_args_kwargs(self):
         self.ran_test = False
 
-        class A(object):
+        class A:
             @classonlymethod
             def example(cls, arg1, arg2, kwarg1=None, kwarg2=None,
                         default=5):
@@ -78,7 +77,7 @@ class TestClassOnlyMethod(unittest.TestCase):
 
 class TestOverrides(unittest.TestCase):
     def test_raises_when_missing(self):
-        class A(object):
+        class A:
             pass
 
         with self.assertRaises(OverrideError):
@@ -88,7 +87,7 @@ class TestOverrides(unittest.TestCase):
                     pass
 
     def test_doc_inherited(self):
-        class A(object):
+        class A:
             def test(self):
                 """Docstring"""
                 pass
@@ -101,7 +100,7 @@ class TestOverrides(unittest.TestCase):
         self.assertEqual(B.test.__doc__, "Docstring")
 
     def test_doc_not_inherited(self):
-        class A(object):
+        class A:
             def test(self):
                 """Docstring"""
                 pass
@@ -117,7 +116,7 @@ class TestOverrides(unittest.TestCase):
 
 class TestClassProperty(unittest.TestCase):
     def test_getter_only(self):
-        class Foo(object):
+        class Foo:
             _foo = 42
 
             @classproperty
@@ -225,11 +224,15 @@ class TestStable(TestStabilityState):
 
     def test_function_signature(self):
         f = self._get_f('0.1.0')
-        # Py2: update this to use inspect.signature when we drop Python 2
-        # inspect.getargspec is deprecated and won't exist in 3.6
-        expected = inspect.ArgSpec(
-            args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
-        self.assertEqual(inspect.getargspec(f), expected)
+
+        parameters = [
+            inspect.Parameter('x', inspect.Parameter.POSITIONAL_OR_KEYWORD),
+            inspect.Parameter('y', inspect.Parameter.POSITIONAL_OR_KEYWORD,
+                              default=42)
+        ]
+        expected = inspect.Signature(parameters)
+
+        self.assertEqual(inspect.signature(f), expected)
         self.assertEqual(f.__name__, 'f')
 
     def test_missing_kwarg(self):
@@ -264,11 +267,15 @@ class TestExperimental(TestStabilityState):
 
     def test_function_signature(self):
         f = self._get_f('0.1.0')
-        # Py2: update this to use inspect.signature when we drop Python 2
-        # inspect.getargspec is deprecated and won't exist in 3.6
-        expected = inspect.ArgSpec(
-            args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
-        self.assertEqual(inspect.getargspec(f), expected)
+
+        parameters = [
+            inspect.Parameter('x', inspect.Parameter.POSITIONAL_OR_KEYWORD),
+            inspect.Parameter('y', inspect.Parameter.POSITIONAL_OR_KEYWORD,
+                              default=42)
+        ]
+        expected = inspect.Signature(parameters)
+
+        self.assertEqual(inspect.signature(f), expected)
         self.assertEqual(f.__name__, 'f')
 
     def test_missing_kwarg(self):
@@ -323,11 +330,15 @@ class TestDeprecated(TestStabilityState):
     def test_function_signature(self):
         f = self._get_f('0.1.0', until='0.1.4',
                         reason='You should now use skbio.g().')
-        # Py2: update this to use inspect.signature when we drop Python 2
-        # inspect.getargspec is deprecated and won't exist in 3.6
-        expected = inspect.ArgSpec(
-            args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
-        self.assertEqual(inspect.getargspec(f), expected)
+
+        parameters = [
+            inspect.Parameter('x', inspect.Parameter.POSITIONAL_OR_KEYWORD),
+            inspect.Parameter('y', inspect.Parameter.POSITIONAL_OR_KEYWORD,
+                              default=42)
+        ]
+        expected = inspect.Signature(parameters)
+
+        self.assertEqual(inspect.signature(f), expected)
         self.assertEqual(f.__name__, 'f')
 
     def test_missing_kwarg(self):
diff --git a/skbio/util/tests/test_misc.py b/skbio/util/tests/test_misc.py
index cb03bbb..ae4b236 100644
--- a/skbio/util/tests/test_misc.py
+++ b/skbio/util/tests/test_misc.py
@@ -6,11 +6,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-from future.builtins import range
-import six
-from six import BytesIO
-
+import io
 import unittest
 from tempfile import NamedTemporaryFile, mkdtemp
 from os.path import exists, join
@@ -70,7 +66,7 @@ class TestMiniRegistry(unittest.TestCase):
         self.assertIn("name", new)
 
     def test_everything(self):
-        class SomethingToInterpolate(object):
+        class SomethingToInterpolate:
             def interpolate_me():
                 """First line
 
@@ -178,10 +174,10 @@ class ChunkStrTests(unittest.TestCase):
         self.assertEqual(chunk_str('abcdefg', 3, ' - '), 'abc - def - g')
 
     def test_invalid_n(self):
-        with six.assertRaisesRegex(self, ValueError, 'n=0'):
+        with self.assertRaisesRegex(ValueError, 'n=0'):
             chunk_str('abcdef', 0, ' ')
 
-        with six.assertRaisesRegex(self, ValueError, 'n=-42'):
+        with self.assertRaisesRegex(ValueError, 'n=-42'):
             chunk_str('abcdef', -42, ' ')
 
 
@@ -204,7 +200,7 @@ class MiscTests(unittest.TestCase):
     def test_safe_md5(self):
         exp = 'ab07acbb1e496801937adfa772424bf7'
 
-        fd = BytesIO(b'foo bar baz')
+        fd = io.BytesIO(b'foo bar baz')
         obs = safe_md5(fd)
         self.assertEqual(obs.hexdigest(), exp)
 
@@ -279,15 +275,14 @@ class CardinalToOrdinalTests(unittest.TestCase):
         self.assertEqual(obs, exp)
 
     def test_invalid_n(self):
-        with six.assertRaisesRegex(self, ValueError, '-1'):
+        with self.assertRaisesRegex(ValueError, '-1'):
             cardinal_to_ordinal(-1)
 
 
 class TestFindDuplicates(unittest.TestCase):
     def test_empty_input(self):
         def empty_gen():
-            return
-            yield
+            yield from ()
 
         for empty in [], (), '', set(), {}, empty_gen():
             self.assertEqual(find_duplicates(empty), set())
@@ -309,8 +304,7 @@ class TestFindDuplicates(unittest.TestCase):
 
     def test_mixed_types(self):
         def gen():
-            for e in 'a', 1, 'bc', 2, 'a', 2, 2, 3.0:
-                yield e
+            yield from ('a', 1, 'bc', 2, 'a', 2, 2, 3.0)
 
         self.assertEqual(find_duplicates(gen()), set(['a', 2]))
 
diff --git a/skbio/util/tests/test_testing.py b/skbio/util/tests/test_testing.py
index 236bd54..8d48722 100644
--- a/skbio/util/tests/test_testing.py
+++ b/skbio/util/tests/test_testing.py
@@ -6,8 +6,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
 import os
 import itertools
 import unittest
diff --git a/skbio/workflow.py b/skbio/workflow.py
index a39affc..b331c1a 100644
--- a/skbio/workflow.py
+++ b/skbio/workflow.py
@@ -200,10 +200,6 @@ allow you to indicate ``anything`` as an option value, anything that is
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from __future__ import absolute_import, division, print_function
-
-from future.utils import viewitems
-
 import sys
 from copy import deepcopy
 from time import time
@@ -211,12 +207,10 @@ from functools import update_wrapper
 from collections import Iterable
 from types import MethodType
 
-import six
-
 from skbio.util._decorator import experimental
 
 
-class NotExecuted(object):
+class NotExecuted:
     """Helper object to track if a method was executed"""
     @experimental(as_of="0.4.0")
     def __init__(self):
@@ -229,7 +223,7 @@ class NotExecuted(object):
 _not_executed = NotExecuted()
 
 
-class Exists(object):
+class Exists:
     """Stub object to assist with ``requires`` when a value exists"""
     @experimental(as_of="0.4.0")
     def __contains__(self, item):
@@ -237,7 +231,7 @@ class Exists(object):
 anything = Exists()  # external, for when a value can be anything
 
 
-class NotNone(object):
+class NotNone:
     @experimental(as_of="0.4.0")
     def __contains__(self, item):
         if item is None:
@@ -247,7 +241,7 @@ class NotNone(object):
 not_none = NotNone()
 
 
-class Workflow(object):
+class Workflow:
     """Arbitrary workflow support structure
 
     Methods that are considered to be directly part of the workflow must
@@ -295,7 +289,7 @@ class Workflow(object):
         self.state = state
         self.iter_ = None
 
-        for k, v in viewitems(kwargs):
+        for k, v in kwargs.items():
             if hasattr(self, k):
                 raise AttributeError("'%s' already exists in self." % k)
             setattr(self, k, v)
@@ -449,7 +443,7 @@ class Workflow(object):
         return update_wrapper(wrapped, func)
 
 
-class method(object):
+class method:
     """Decorate a function to indicate it is a workflow method
 
     Parameters
@@ -471,7 +465,7 @@ class method(object):
         return func
 
 
-class requires(object):
+class requires:
     """Decorator that executes a function if requirements are met
 
     Parameters
@@ -507,7 +501,7 @@ class requires(object):
         elif isinstance(values, set):
             self.values = values
         else:
-            if isinstance(values, six.string_types):
+            if isinstance(values, str):
                 self.values = values
             elif isinstance(values, Iterable):
                 self.values = set(values)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-skbio.git



More information about the debian-med-commit mailing list