[med-svn] [python-cobra] 01/02: New upstream version 0.8.2

Afif Elghraoui afif at moszumanska.debian.org
Wed Oct 18 02:11:25 UTC 2017


This is an automated email from the git hooks/post-receive script.

afif pushed a commit to branch master
in repository python-cobra.

commit 6db8bde56ca9bdb588abcbd83df048b8e66f9bd2
Author: Afif Elghraoui <afif at debian.org>
Date:   Fri Aug 11 17:00:14 2017 -0400

    New upstream version 0.8.2
---
 .coveragerc                                        |   10 +-
 CONTRIBUTING.rst => .github/CONTRIBUTING.rst       |  107 +-
 .github/ISSUE_TEMPLATE.md                          |   28 +
 .gitignore                                         |    4 +-
 .travis.yml                                        |   55 +-
 INSTALL.rst                                        |   34 +-
 MANIFEST.in                                        |    2 +-
 README.rst                                         |   20 +-
 appveyor.yml                                       |   79 +-
 appveyor/build_glpk.py                             |    1 +
 ...cobra-cameo-merge-benchmarks-before-merge.ipynb |  362 ++++
 benchmarks/cobra-cameo-merge-benchmarks.ipynb      |  548 +++++
 cobra/VERSION                                      |    1 -
 cobra/__init__.py                                  |   23 +-
 cobra/config.py                                    |   10 +
 cobra/core/Metabolite.py                           |  285 ---
 cobra/core/Model.py                                |  384 ----
 cobra/core/Object.py                               |   26 -
 cobra/core/Reaction.py                             |  708 -------
 cobra/core/Solution.py                             |   39 -
 cobra/core/__init__.py                             |   29 +-
 .../{ArrayBasedModel.py => arraybasedmodel.py}     |   20 +-
 cobra/core/{DictList.py => dictlist.py}            |  175 +-
 cobra/core/{Formula.py => formula.py}              |   31 +-
 cobra/core/{Gene.py => gene.py}                    |  141 +-
 cobra/core/metabolite.py                           |  264 +++
 cobra/core/model.py                                | 1070 ++++++++++
 cobra/core/object.py                               |   54 +
 cobra/core/reaction.py                             | 1133 ++++++++++
 cobra/core/solution.py                             |  322 +++
 cobra/core/{Species.py => species.py}              |   23 +-
 cobra/design/__init__.py                           |    6 +-
 cobra/design/design_algorithms.py                  |  432 +---
 cobra/exceptions.py                                |   58 +
 cobra/flux_analysis/__init__.py                    |   37 +-
 cobra/flux_analysis/deletion_worker.py             |    9 +-
 cobra/flux_analysis/double_deletion.py             |   30 +-
 cobra/flux_analysis/essentiality.py                |   70 -
 cobra/flux_analysis/gapfilling.py                  |  468 +++--
 cobra/flux_analysis/loopless.py                    |  252 ++-
 cobra/flux_analysis/moma.py                        |  123 +-
 cobra/flux_analysis/parsimonious.py                |  334 ++-
 cobra/flux_analysis/phenotype_phase_plane.py       |  262 ++-
 cobra/flux_analysis/reaction.py                    |  343 +--
 cobra/flux_analysis/sampling.py                    |  826 ++++++++
 cobra/flux_analysis/single_deletion.py             |  442 ++--
 cobra/flux_analysis/summary.py                     |  142 +-
 cobra/flux_analysis/variability.py                 |  336 ++-
 cobra/io/__init__.py                               |   40 +-
 cobra/io/dict.py                                   |  225 ++
 cobra/io/json.py                                   |  301 +--
 cobra/io/mat.py                                    |   94 +-
 cobra/io/sbml.py                                   |  541 ++---
 cobra/io/sbml3.py                                  |  150 +-
 cobra/io/yaml.py                                   |  114 +
 cobra/manipulation/__init__.py                     |   22 +-
 cobra/manipulation/annotate.py                     |    4 +-
 cobra/manipulation/delete.py                       |   12 +-
 cobra/manipulation/modify.py                       |  153 +-
 cobra/manipulation/validate.py                     |    9 +-
 cobra/oven/README                                  |   30 -
 cobra/oven/WARNING                                 |    1 -
 cobra/oven/__init__.py                             |    2 -
 cobra/oven/aliebrahim/__init__.py                  |    0
 cobra/oven/aliebrahim/designAnalysis.py            |  143 --
 cobra/oven/aliebrahim/gapAnalysis_MILP_figure.svg  |  103 -
 cobra/oven/aliebrahim/keggIO.py                    |  163 --
 cobra/oven/aliebrahim/simphenyIO.py                |  184 --
 cobra/oven/danielhyduke/__init__.py                |    0
 cobra/oven/danielhyduke/construction/balance.py    |  329 ---
 .../oven/danielhyduke/construction/omics_guided.py |   18 -
 cobra/oven/danielhyduke/general/__init__.py        |    1 -
 cobra/oven/danielhyduke/general/arrays.py          |   32 -
 cobra/oven/danielhyduke/jython/README              |    1 -
 cobra/oven/danielhyduke/jython/__init__.py         |    0
 cobra/oven/danielhyduke/jython/numpy/README        |    1 -
 cobra/oven/danielhyduke/jython/numpy/__init__.py   |    5 -
 .../oven/danielhyduke/jython/numpy/core/Matrix.py  |  211 --
 .../danielhyduke/jython/numpy/core/__init__.py     |    3 -
 cobra/oven/danielhyduke/jython/numpy/core/core.py  |  582 -----
 .../danielhyduke/jython/numpy/core/multiarray.py   |  312 ---
 .../oven/danielhyduke/jython/numpy/core/numeric.py | 2217 --------------------
 .../danielhyduke/jython/numpy/core/numerictypes.py |  734 -------
 cobra/oven/danielhyduke/jython/scipy/README        |    1 -
 cobra/oven/danielhyduke/jython/scipy/__init__.py   |    0
 .../danielhyduke/jython/scipy/sparse/__init__.py   |   22 -
 .../oven/danielhyduke/jython/scipy/sparse/base.py  |  615 ------
 .../danielhyduke/jython/scipy/sparse/compressed.py |  708 -------
 .../danielhyduke/jython/scipy/sparse/construct.py  |  535 -----
 cobra/oven/danielhyduke/jython/scipy/sparse/csr.py |  652 ------
 cobra/oven/danielhyduke/jython/scipy/sparse/lil.py |  454 ----
 .../danielhyduke/jython/scipy/sparse/sputils.py    |  133 --
 cobra/oven/danielhyduke/query/__init__.py          |    1 -
 cobra/oven/danielhyduke/query/query.py             |   63 -
 cobra/solvers/__init__.py                          |   16 +-
 cobra/solvers/cglpk.pyx                            |   19 +-
 cobra/solvers/coin.py                              |   12 +-
 cobra/solvers/cplex_solver.py                      |   15 +-
 cobra/solvers/cplex_solver_java.py                 |   27 +-
 cobra/solvers/esolver.py                           |   18 +-
 cobra/solvers/glpk_solver.py                       |   23 +-
 cobra/solvers/glpk_solver_java.py                  |   29 +-
 cobra/solvers/gurobi_solver.py                     |   20 +-
 cobra/solvers/gurobi_solver_java.py                |   27 +-
 cobra/solvers/mosek.py                             |   10 +-
 cobra/solvers/parameters.py                        |    4 +
 cobra/solvers/wrappers.py                          |    4 +
 cobra/test/__init__.py                             |   18 +-
 cobra/test/conftest.py                             |   98 +-
 cobra/test/data/iJO1366.pickle                     |  Bin 1354269 -> 1810497 bytes
 cobra/test/data/mini.json                          |    3 +-
 cobra/test/data/mini.mat                           |  Bin 14600 -> 17688 bytes
 cobra/test/data/mini.pickle                        |  Bin 25128 -> 32825 bytes
 cobra/test/data/mini.yml                           | 1148 ++++++++++
 cobra/test/data/mini_cobra.xml                     |  104 +-
 cobra/test/data/mini_fbc1.xml                      |   35 +-
 cobra/test/data/mini_fbc2.xml                      |    1 +
 cobra/test/data/mini_fbc2.xml.bz2                  |  Bin 5008 -> 5016 bytes
 cobra/test/data/mini_fbc2.xml.gz                   |  Bin 5719 -> 5729 bytes
 cobra/test/data/raven.pickle                       |  Bin 11233 -> 13758 bytes
 cobra/test/data/salmonella.pickle                  |  Bin 1475508 -> 2151196 bytes
 cobra/test/data/textbook_fva.json                  |    2 +-
 cobra/test/data/textbook_pfba_fva.json             |    1 +
 cobra/test/data/textbook_solution.pickle           |  Bin 5278 -> 143584 bytes
 cobra/test/data/update_pickles.py                  |   38 +-
 cobra/test/test_design.py                          |   60 -
 cobra/test/test_flux_analysis.py                   |  910 +++++---
 cobra/test/test_io.py                              |   54 +-
 cobra/test/test_manipulation.py                    |   51 +-
 cobra/test/test_model.py                           |  542 ++++-
 cobra/test/test_solver_model.py                    |  800 +++++++
 cobra/test/test_solver_utils.py                    |  133 ++
 cobra/test/test_solvers.py                         |   17 +-
 cobra/test/{test_dictlist.py => test_util.py}      |  111 +-
 cobra/topology/__init__.py                         |   12 +-
 cobra/topology/reporter_metabolites.py             |  137 +-
 cobra/util/__init__.py                             |    8 +
 cobra/util/array.py                                |  198 ++
 cobra/util/context.py                              |   71 +
 cobra/util/solver.py                               |  444 ++++
 cobra/util/util.py                                 |   15 +
 cobra/util/version_info.py                         |   79 +
 cobra/version.py                                   |  133 --
 config.sh                                          |   45 +-
 develop-requirements.txt                           |    5 +-
 documentation_builder/autodoc.sh                   |    6 +-
 documentation_builder/building_model.ipynb         |  238 ++-
 documentation_builder/cobra.core.rst               |   40 +-
 documentation_builder/cobra.flux_analysis.rst      |   16 +-
 documentation_builder/cobra.rst                    |   21 +
 documentation_builder/cobra.util.rst               |   46 +
 documentation_builder/conf.py                      |   52 +-
 documentation_builder/constraints_objectives.ipynb |  484 +++++
 documentation_builder/deletions.ipynb              |  361 ++--
 documentation_builder/faq.ipynb                    |  149 +-
 documentation_builder/gapfilling.ipynb             |  174 +-
 documentation_builder/getting_started.ipynb        |  542 +++--
 documentation_builder/index.rst                    |    4 +-
 documentation_builder/io.ipynb                     |  290 ++-
 documentation_builder/loopless.ipynb               |  399 ++--
 documentation_builder/milp.ipynb                   |  423 ----
 documentation_builder/phenotype_phase_plane.ipynb  |  309 ++-
 documentation_builder/plot_helper.py               |    8 +-
 documentation_builder/pymatbridge.ipynb            |    2 +-
 documentation_builder/qp.ipynb                     |  287 ---
 documentation_builder/sampling.ipynb               |  576 +++++
 documentation_builder/simulating.ipynb             |  811 +++----
 documentation_builder/solvers.ipynb                |  676 +-----
 ez_setup.py                                        |  426 ----
 manylinux_builder/Dockerfile                       |    2 +-
 manylinux_builder/build_cobrapy.sh                 |    1 -
 release-notes/0.6.0.md                             |  206 ++
 release-notes/0.6.1.md                             |    9 +
 release-notes/0.6.2.md                             |   29 +
 release-notes/0.7.0.md                             |   23 +
 release-notes/0.8.0.md                             |   27 +
 release-notes/0.8.1.md                             |   14 +
 release-notes/0.8.2.md                             |   17 +
 release-notes/next-release.md                      |    7 +
 scripts/compare-benchmark.py                       |   35 +
 scripts/deploy.sh                                  |   12 +-
 setup.cfg                                          |   54 +
 setup.py                                           |   65 +-
 tox.ini                                            |   24 +-
 184 files changed, 16764 insertions(+), 15677 deletions(-)

diff --git a/.coveragerc b/.coveragerc
index 05ff111..901715c 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -4,15 +4,7 @@ branch = True
 source = cobra
 omit =
     cobra/solvers/*
-    cobra/test/data/*
-    cobra/test_all.py
-    cobra/version.py
-    cobra/oven/*
-    # deprecated code
-    cobra/core/Formula.py
-    # can not be run with free solver
-    cobra/flux_analysis/moma.py
-
+    cobra/test/*
 
 [report]
 # Regexes for lines to exclude from consideration
diff --git a/CONTRIBUTING.rst b/.github/CONTRIBUTING.rst
similarity index 68%
rename from CONTRIBUTING.rst
rename to .github/CONTRIBUTING.rst
index fc3e6b7..e56792f 100644
--- a/CONTRIBUTING.rst
+++ b/.github/CONTRIBUTING.rst
@@ -2,7 +2,8 @@
 Contributing
 ============
 
-Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given.
+Contributions are welcome, and they are greatly appreciated! Every little bit
+helps, and credit will always be given.
 
 You can contribute in many ways:
 
@@ -12,34 +13,42 @@ Types of Contributions
 Report Bugs
 ~~~~~~~~~~~
 
-Report bugs using the `issue tracker <https://github.com/opencobra/cobrapy/issues>`__  
+Report bugs using the `issue tracker <https://github.com/opencobra/cobrapy/issues>`__
 
 If you are reporting a bug, please include:
 
 * Your operating system name and version.
+* Your Python and cobrapy version.
 * Any details about your local setup that might be helpful in troubleshooting.
 * Detailed steps to reproduce the bug.
 
 Fix Bugs
 ~~~~~~~~
 
-Look through the GitHub `issues <https://github.com/opencobra/cobrapy/issues>`__ for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to
-implement it.
+Look through the GitHub `issues <https://github.com/opencobra/cobrapy/issues>`__
+for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants
+to implement it.
 
 Implement Features
 ~~~~~~~~~~~~~~~~~~
 
-Look through the GitHub `issues <https://github.com/opencobra/cobrapy/issues>`__ and `projects <https://github.com/opencobra/cobrapy/projects>`__ for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it.
+Look through the GitHub `issues <https://github.com/opencobra/cobrapy/issues>`__
+and `projects <https://github.com/opencobra/cobrapy/projects>`__ for features.
+Anything tagged with "enhancement" and "help wanted" is open to whoever wants to
+implement it.
 
 Write Documentation
 ~~~~~~~~~~~~~~~~~~~
 
-cobrapy could always use more documentation, whether as part of the official cobrapy docs, in docstrings, or even on the web in blog posts, articles, and such - all contributions are welcome!
+cobrapy could always use more documentation, whether as part of the official
+cobrapy docs, in docstrings, or even on the web in blog posts, articles, and
+such - all contributions are welcome!
 
 Submit Feedback
 ~~~~~~~~~~~~~~~
 
-The best way to send feedback is to file an `issue <https://github.com/opencobra/cobrapy/issues>`__.
+The best way to send feedback is to file an
+`issue <https://github.com/opencobra/cobrapy/issues>`__.
 
 If you are proposing a feature:
 
@@ -48,21 +57,25 @@ If you are proposing a feature:
 * Remember that this is a volunteer-driven project, and that contributions
   are welcome :)
 
-If you like cobrapy please remember to 'star' our github page (click on the star at top right corner), that way we also have an idea of who is using cobrapy!
+If you like cobrapy please remember to 'star' our github page (click on the star
+at top right corner), that way we also have an idea of who is using cobrapy!
 
 Get Started!
 ------------
 
-Want to contribute a new feature or improvement? Consider starting by raising an issue and assign it to yourself to
-describe what you want to achieve. This way, we reduce the risk of duplicated efforts and you may also get
-suggestions on how to best proceed, e.g. there may be half-finished work in some branch that you could start with.
+Want to contribute a new feature or improvement? Consider starting by raising an
+issue and assign it to yourself to describe what you want to achieve. This way,
+we reduce the risk of duplicated efforts and you may also get suggestions on how
+to best proceed, e.g. there may be half-finished work in some branch that you
+could start with.
 
-Here's how to set up `cobrapy` for local development to contribute smaller features or changes that you can implement yourself.
+Here's how to set up `cobrapy` for local development to contribute smaller
+features or changes that you can implement yourself.
 
 1. Fork the `cobrapy` repository on GitHub.
 2. Clone your fork locally::
 
-    $ git clone git at github.com:your_name_here/cobrapy.git
+    $ git clone git at github.com:<your Github name>/cobrapy.git
 
 3. Install libglpk using your package manager. For macOS::
 
@@ -72,7 +85,8 @@ Here's how to set up `cobrapy` for local development to contribute smaller featu
 
 	$ sudo apt-get install libglpk-dev
 
-4. If virtualenvwrapper is not installed, `follow the directions <https://virtualenvwrapper.readthedocs.io/en/latest/>`__
+4. If virtualenvwrapper is not installed,
+   `follow the directions <https://virtualenvwrapper.readthedocs.io/en/latest/>`__
    to install virtualenvwrapper.
 
 5. Install your local copy of cobrapy into a virtualenv with virtualenvwrapper::
@@ -80,40 +94,63 @@ Here's how to set up `cobrapy` for local development to contribute smaller featu
     $ cd cobrapy
     $ mkvirtualenv cobrapy
 
-   Use the ``--python`` option to select a specific version of Python for the virtualenv. Note on macOS, matplotlib
-   requires Python be installed as a framework but virtualenv creates a non-framework build of Python.
-   See the `matplotlib FAQ <http://matplotlib.org/1.5.3/faq/virtualenv_faq.html>`__ for details
-   on a workaround.
+   Use the ``--python`` option to select a specific version of Python for the
+   virtualenv. Note on macOS, matplotlib requires Python be installed as a
+   framework but virtualenv creates a non-framework build of Python.  See the
+   `matplotlib FAQ <http://matplotlib.org/1.5.3/faq/virtualenv_faq.html>`__ for
+   details on a workaround.
 
 6. Install the required packages for development in the virtualenv using pip install::
 
-   (cobrapy)$ pip install --upgrade pip
-   (cobrapy)$ pip install -r develop-requirements.txt
+    (cobrapy)$ pip install --upgrade pip setuptools wheel
+    (cobrapy)$ pip install -r develop-requirements.txt
 
-7. Setup cobrapy for development::
+   If you want to work on the Matlab interface, please also install
+   ``pymatbridge``::
 
-    (cobrapy)$ python setup.py develop
+    (cobrapy)$ pip install pymatbridge
 
-8. Create a branch for local development (see below for details on the branching model)::
+7. Check out the branch that you want to contribute to. Most likely that will be
+   ``devel``::
+
+    (cobrapy)$ git checkout devel
+
+8. Create a branch for local development based on the previously checked out
+   branch (see below for details on the branching model)::
 
     (cobrapy)$ git checkout -b name-of-your-bugfix-or-feature
 
    Now you can make your changes locally.
 
-9. When you are done making changes, check that your changes pass pep8
-   and the tests with tox for the supported Python versions::
+9. Setup cobrapy for development::
+
+    (cobrapy)$ python setup.py develop
+
+   or::
+
+    (cobrapy)$ pip install -e .
+
+10. When you are done making changes, check that your changes pass pep8
+    and the tests with tox for your local Python version::
+
+     (cobrapy)$ tox -e pep8
+
+    and likely one of::
 
-    (cobrapy)$ tox -e py27
-    (cobrapy)$ tox -e py34
-    (cobrapy)$ tox -e py35
+     (cobrapy)$ tox -e py27
+     (cobrapy)$ tox -e py34
+     (cobrapy)$ tox -e py35
 
-10. Commit your changes and push your branch to GitHub::
+11. Commit your changes and push your branch to GitHub::
 
     (cobrapy)$ git add .
     (cobrapy)$ git commit -m "Your detailed description of your changes."
     (cobrapy)$ git push origin name-of-your-bugfix-or-feature
 
-11. Submit a pull request through the GitHub website.
+12. Submit a pull request through the GitHub website. Once you submit a pull
+    request your changes will be tested automatically against multiple Python
+    versions and operating systems. Further errors might appear during those
+    tests.
 
 For larger features that you want to work on collaboratively with other cobrapy team members, you may consider to first request to join the cobrapy developers team to get write access to the repository so that you can create a branch in the main repository (or simply ask the maintainer to create a branch for you). Once you have a new branch you can push your changes directly to the main repository and when finished, submit a pull request from that branch to ``devel``.
 
@@ -185,12 +222,14 @@ Branching model
 ``devel``
     Is the branch all pull-requests should be based on.
 ``master``
-    Is only touched by maintainers and is the branch with only tested, reviewed code that is released or ready for the
-    next release.
+    Is only touched by maintainers and is the branch with only tested, reviewed
+    code that is released or ready for the next release.
 ``{fix, bugfix, doc, feature}/descriptive-name``
-    Is the recommended naming scheme for smaller improvements, bugfixes, documentation improvement and new features respectively.
+    Is the recommended naming scheme for smaller improvements, bugfixes,
+    documentation improvement and new features respectively.
 
-Please use concise descriptive commit messages and consider using ``git pull --rebase`` when you update your own fork to avoid merge commits.
+Please use concise descriptive commit messages and consider using
+``git pull --rebase`` when you update your own fork to avoid merge commits.
 
 1. Tests are in the ``cobra/test`` directory. They are automatically run
    through continuous integration services on both python 2 and python 3
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..096211c
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,28 @@
+#### Problem description
+
+Please explain:
+* **what** you tried to achieve,
+* **how** you went about it (referring to the code sample), and
+* **why** the current behaviour is a problem and what output
+  you expected instead.
+
+#### Code Sample
+
+Create a [minimal, complete, verifiable example
+](https://stackoverflow.com/help/mcve).
+
+```python
+# Paste your code here.
+
+```
+
+#### Actual Output
+
+#### Expected Output
+
+#### Output of `cobra.show_versions()`
+
+<details>
+# Paste the output of `import cobra;cobra.show_versions()` here.
+
+</details>
diff --git a/.gitignore b/.gitignore
index 7f4e088..65f38b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -93,5 +93,5 @@ manylinux_builder/wheelhouse
 *~
 venv/
 .benchmarks/
-/glpk-4.60.tar.gz
-glpk-4.60
+glpk-4.*
+/.testmondata
diff --git a/.travis.yml b/.travis.yml
index 1a002b4..db8a64a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -11,6 +11,15 @@ addons:
     packages:
       - libfreetype6-dev
       - libpng12-dev
+git:
+  depth: 3
+
+branches:
+ only:
+ - master
+ - devel
+ - devel-2
+ - /^[0-9]+\.[0-9]+\.[0-9]+[.0-9ab]*$/
 
 env:
   global:
@@ -20,10 +29,15 @@ env:
     - UNICODE_WIDTH=32
 
 matrix:
+  fast_finish: true
   exclude:
-      - python: 3.5
+    - python: 3.5
   include:
     - os: linux
+      python: 3.5
+      env:
+        - TOXENV=pep8
+    - os: linux
       env:
         - MB_PYTHON_VERSION=2.7
     - os: linux
@@ -32,6 +46,9 @@ matrix:
     - os: linux
       env:
         - MB_PYTHON_VERSION=3.5
+    - os: linux
+      env:
+        - MB_PYTHON_VERSION=3.6
     - os: osx
       language: objective-c
       env:
@@ -44,25 +61,39 @@ matrix:
       language: objective-c
       env:
         - MB_PYTHON_VERSION=3.5
+    - os: osx
+      language: objective-c
+      env:
+        - MB_PYTHON_VERSION=3.6
 
 before_install:
-  - (git clone https://github.com/matthew-brett/multibuild.git && cd multibuild && git checkout ffe5995)
-  # matplotlib non-compatible as testing runs in venv (non-framework)
-  - TEST_DEPENDS="cython codecov coverage numpy scipy python-libsbml jsonschema six pytest pytest-cov pytest-benchmark pandas tabulate"
-  - BUILD_DEPENDS="cython numpy scipy"
-  - source multibuild/common_utils.sh
-  - source multibuild/travis_steps.sh
-  - before_install
+  - if [[ -n "${MB_PYTHON_VERSION}" ]]; then
+      (travis_retry git clone https://github.com/matthew-brett/multibuild.git && cd multibuild && git checkout edf5b691d0d565b4e65e655b983c11c883acbeca);
+      TEST_DEPENDS="swiglpk optlang sympy decorator cython codecov coverage numpy scipy jsonschema six pytest pytest-cov pytest-benchmark tabulate";
+      BUILD_DEPENDS="swiglpk optlang sympy cython numpy scipy";
+      source multibuild/common_utils.sh;
+      source multibuild/travis_steps.sh;
+      before_install;
+    fi
+  - pip install -U pip setuptools wheel tox
 
 before_cache:
   - set +e
 
 install:
-  - build_wheel . $PLAT
+  - if [[ -n "${MB_PYTHON_VERSION}" ]]; then
+      travis_retry build_wheel . $PLAT;
+    fi
 
 script:
-  - if [[ $TRAVIS_OS_NAME == "linux" ]]; then pip install pip --upgrade; pip install 'sphinx>=1.5' rstcheck pep8; pep8 cobra --exclude=oven,solvers,sbml.py --show-source; rstcheck *.rst; fi
-  - install_run $PLAT
+  - if [[ -n "${MB_PYTHON_VERSION}" ]]; then
+      travis_retry install_run $PLAT;
+    else
+      pip install rstcheck Cython;
+      find . -name "*.rst" -exec rstcheck {} +;
+      tox -e "${TOXENV}";
+    fi
+  - ls ${TRAVIS_BUILD_DIR}/wheelhouse/ || echo "no wheelhouse"
 
 deploy:
   provider: script
@@ -73,4 +104,4 @@ deploy:
     tags: true
 
 after_success:
-  - if [[ $TRAVIS_OS_NAME == "linux" ]]; then pip install pip --upgrade; pip install codecov; codecov; fi
+  - if [[ $TRAVIS_OS_NAME == "linux" ]]; then pip install codecov; codecov; fi
diff --git a/INSTALL.rst b/INSTALL.rst
index 69fd03b..778fd60 100644
--- a/INSTALL.rst
+++ b/INSTALL.rst
@@ -65,13 +65,11 @@ yum).
    -  Use ``sudo pip install python-libsbml`` on Mac/Linux
 
 2. `lxml <http://lxml.de/>`_ to speed up read/write of SBML level 3 files.
-3. `numpy <http://numpy.org>`_ >= 1.6.1 for double deletions
-
-   -  `Windows numpy installer <http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy>`_
-4. `scipy <http://scipy.org>`_ >= 0.11 for ArrayBasedModel and saving to \*.mat files.
 
+3. `scipy <http://scipy.org>`_ >= 0.11 for MOMA and saving to \*.mat files.
    -  `Windows scipy installer <http://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy>`_
-5. `pytest <http://docs.pytest.org/en/latest/>`_ and `pytest-benchmark
+
+4. `pytest <http://docs.pytest.org/en/latest/>`_ and `pytest-benchmark
    <http://pytest-benchmark.readthedocs.io/en/latest/>`_ are required
    for testing
 
@@ -81,13 +79,12 @@ You can install all packages directly by
 
     pip install "cobra[all]"
 
-Other solvers
+Solvers
 ~~~~~~~~~~~~~
 
-cobrapy comes with bindings to the GNU Linear Programming Kit ([glpk]
-(http://www.gnu.org/software/glpk/)) using its own bindings called
-"cglpk" in cobrapy. In addition, cobrapy currently supports these linear
-programming solvers:
+cobrapy uses `optlang <http://optlang.readthedocs.io>`_ to interface
+the mathematical solvers used to optimize the created COBRA models,
+which at the time of writing
 
 -  ILOG/CPLEX (available with
    `Academic <https://www.ibm.com/developerworks/university/academicinitiative/>`_
@@ -95,19 +92,7 @@ programming solvers:
    `Commercial <http://www.ibm.com/software/integration/optimization/cplex-optimizer/>`_
    licenses).
 -  `gurobi <http://gurobi.com>`_
--  `QSopt\_ex
-   esolver <http://www.dii.uchile.cl/~daespino/ESolver_doc/main.html>`_
--  `MOSEK <http://www.mosek.com/>`_
--  `coin-or clp and cbc <http://coin-or.org/>`_ through
-   `cylp <https://github.com/coin-or/CyLP>`_.
-
-ILOG/CPLEX, MOSEK, and Gurobi are commercial software packages that
-currently provide free licenses for academics and support both linear
-and quadratic programming. GLPK and clp are open source linear
-programming solvers; however, they may not be as robust as the
-commercial solvers for mixed-integer and quadratic programming.
-QSopt\_ex esolver is also open source, and can solve linear programs
-using rational operations, giving exact solutions.
+-  `glpk <http://www.gnu.org/software/glpk/>`_
 
 Testing your installation
 =========================
@@ -133,4 +118,5 @@ Then start python and type the following into the Python shell
     test_all()
 
 You should see some skipped tests and expected failures, and the
-function should return ``True``.
+function should return ``0``. If you see a value other than ``0`` please file
+an `issue report <CONTRIBUTING.rst>`_.
diff --git a/MANIFEST.in b/MANIFEST.in
index d1e6a98..a68bad7 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,2 +1,2 @@
-include README.rst INSTALL.rst LICENSE ez_setup.py
+include README.rst INSTALL.rst LICENSE
 include cobra/solvers/cglpk.pyx cobra/solvers/glpk.pxd
diff --git a/README.rst b/README.rst
index ba80cb4..f6de8fa 100644
--- a/README.rst
+++ b/README.rst
@@ -1,7 +1,7 @@
 cobrapy - constraint-based reconstruction and analysis in python
 ================================================================
 
-|Build Status| |Coverage Status| |Build status| |PyPI| |Gitter|
+|Build Status| |Coverage Status| |Build status| |PyPI| |Gitter| |Waffle|
 
 What is cobrapy?
 ~~~~~~~~~~~~~~~~
@@ -18,7 +18,7 @@ for:
 
 - creating and managing metabolic models
 - accessing popular solvers
-- analyzing models with methods such as FVA, FBA, pFBA, MOMA etc. 
+- analyzing models with methods such as FVA, FBA, pFBA, MOMA etc.
 - inspecting models and drawing conclusions on gene essentiality,
   testing consequences of knock-outs etc.
 
@@ -36,10 +36,12 @@ also be
 `downloaded <https://readthedocs.org/projects/cobrapy/downloads/>`_.
 
 Please use the `Google
-Group <http://groups.google.com/group/cobra-pie>`_ for help.
-Alternatively, you can use
-`gitter.im <https://gitter.im/opencobra/cobrapy>`_ for quick questions
-and discussions about cobrapy (faster response times).
+Group <http://groups.google.com/group/cobra-pie>`_ for help. By writing a well formulated question, with sufficient
+detail, you are much more likely to quickly receive a good answer! Please refer to these `StackOverflow
+guidelines <https://stackoverflow.com/help/how-to-ask>`_ on how to ask questions.
+Alternatively, you can use `gitter.im <https://gitter.im/opencobra/cobrapy>`_ for quick questions
+and discussions about cobrapy (faster response times). Please keep in mind that answers are provided on a volunteer
+basis.
 
 More information about opencobra is available at the
 `website <http://opencobra.github.io/>`_.
@@ -68,7 +70,7 @@ Contributing
 ~~~~~~~~~~~~
 
 Contributions are always welcome! Please read the `contributions
-guideline <CONTRIBUTING.rst>`_ to get started.
+guideline <.github/CONTRIBUTING.rst>`_ to get started.
 
 License
 -------
@@ -99,4 +101,6 @@ Public License for more details.
    :target: https://pypi.python.org/pypi/cobra
 .. |Gitter| image:: https://badges.gitter.im/opencobra/cobrapy.svg
    :target: https://gitter.im/opencobra/cobrapy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge
-
+.. |Waffle| image:: https://badge.waffle.io/opencobra/cobrapy.png?label=ready&title=Ready
+   :target: https://waffle.io/opencobra/cobrapy
+   :alt: 'Stories in Ready'
diff --git a/appveyor.yml b/appveyor.yml
index 8fcedcb..2571dc9 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,3 +1,10 @@
+branches:
+ only:
+ - master
+ - devel
+ - devel-2
+ - /^[0-9]+\.[0-9]+\.[0-9]+[.0-9ab]*$/
+
 environment:
 
   global:
@@ -8,17 +15,36 @@ environment:
     PIP_CACHE_DIR: "pip_cache"
 
   matrix:
-    - PYTHON: "C:\\Python27"
+    - PYTHON: "C:\\Miniconda-x64"
       PYTHON_VERSION: "2.7.12"
-      PYTHON_ARCH: "32"
-
-    - PYTHON: "C:\\Python34"
-      PYTHON_VERSION: "3.4.5"
-      PYTHON_ARCH: "32"
+      PYTHON_ARCH: "64"
+      CONDA: true
 
-    - PYTHON: "C:\\Python35"
+    - PYTHON: "C:\\Miniconda35-x64"
       PYTHON_VERSION: "3.5.2"
-      PYTHON_ARCH: "32"
+      PYTHON_ARCH: "64"
+      CONDA: true
+
+    - PYTHON: "C:\\Miniconda35-x64"
+      PYTHON_VERSION: "3.6.0"
+      PYTHON_ARCH: "64"
+      CONDA: true
+
+    # - PYTHON: "C:\\Python27"
+    #   PYTHON_VERSION: "2.7.12"
+    #   PYTHON_ARCH: "32"
+
+    # - PYTHON: "C:\\Python34"
+    #   PYTHON_VERSION: "3.4.5"
+    #   PYTHON_ARCH: "32"
+
+    # - PYTHON: "C:\\Python35"
+    #   PYTHON_VERSION: "3.5.2"
+    #   PYTHON_ARCH: "32"
+
+    # - PYTHON: "C:\\Python36"
+    #   PYTHON_VERSION: "3.6.0"
+    #   PYTHON_ARCH: "32"
 
     - PYTHON: "C:\\Python27-x64"
       PYTHON_VERSION: "2.7.12"
@@ -32,6 +58,13 @@ environment:
       PYTHON_VERSION: "3.5.2"
       PYTHON_ARCH: "64"
 
+    - PYTHON: "C:\\Python36-x64"
+      PYTHON_VERSION: "3.6.0"
+      PYTHON_ARCH: "64"
+
+matrix:
+  fast_finish: true
+
 clone_depth: 25
 
 init:
@@ -43,31 +76,33 @@ cache:
 
 
 install:
-  - "powershell appveyor\\install.ps1"
-  - ps: Start-FileDownload 'https://bitbucket.org/gutworth/six/raw/default/six.py'
-  - "%PYTHON%/python -m pip install pip setuptools>=24.0 wheel --upgrade"
-  - "%WITH_COMPILER% %PYTHON%/python appveyor/build_glpk.py"
-  - "%PYTHON%/python -m pip install pip setuptools wheel --upgrade"
-  - "%PYTHON%/python -m pip install --upgrade pytest"
-  - "%PYTHON%/python -m pip install pytest-cov pytest-benchmark"
-  - "%PYTHON%/python -m pip install Cython jsonschema twine pypandoc==1.1.3"
+  - "set PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+  - ps: |
+      if ($env:CONDA -eq "true") {
+          conda config --set always_yes yes --set changeps1 no;
+          conda install -q pip } else {
+          python -m pip install pip setuptools>=24.0 wheel --upgrade }
+  - if not defined CONDA %WITH_COMPILER% python -m pip install --upgrade pytest
+  - if defined CONDA conda install -q setuptools pytest numpy scipy
+  - python -m pip install pytest-cov pytest-benchmark pandas swiglpk optlang python-libsbml decorator Cython jsonschema twine pypandoc==1.1.3
+  - "%WITH_COMPILER% python appveyor/build_glpk.py"
 
 build: off
 
 test_script:
-  - "%WITH_COMPILER% %PYTHON%/python setup.py develop"
-  - "%WITH_COMPILER% %PYTHON%/python -m pytest --cov=cobra --benchmark-skip"
+  - "%WITH_COMPILER% python setup.py develop"
+  - "%WITH_COMPILER% python -m pytest --cov=cobra --benchmark-skip"
 
 after_test:
-  - "%WITH_COMPILER% %PYTHON%/python setup.py bdist_wheel bdist_wininst"
+  - if not defined CONDA %WITH_COMPILER% python setup.py bdist_wheel bdist_wininst
 
 artifacts:
   - path: dist\*
 
 deploy_script:
-  - ps: >-
-      if($env:appveyor_repo_tag -eq 'True') {
-          Invoke-Expression "$env:PYTHON/Scripts/twine upload dist/* --username $env:PYPI_USERNAME --password $env:PYPI_PASSWORD"
+  - ps: |
+      if($env:appveyor_repo_tag -eq "True" -And $env:CONDA -ne "true") {
+          Invoke-Expression "twine upload dist/* --username $env:PYPI_USERNAME --password $env:PYPI_PASSWORD"
       }
 
 #on_success:
diff --git a/appveyor/build_glpk.py b/appveyor/build_glpk.py
index ab05865..69a47f3 100644
--- a/appveyor/build_glpk.py
+++ b/appveyor/build_glpk.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 import os
 import sys
 import hashlib
diff --git a/benchmarks/cobra-cameo-merge-benchmarks-before-merge.ipynb b/benchmarks/cobra-cameo-merge-benchmarks-before-merge.ipynb
new file mode 100644
index 0000000..bab9c1d
--- /dev/null
+++ b/benchmarks/cobra-cameo-merge-benchmarks-before-merge.ipynb
@@ -0,0 +1,362 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import cobra\n",
+    "from cobra.io import read_sbml_model\n",
+    "from cobra.test import create_test_model\n",
+    "from cobra import Model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['/Users/niso/anaconda/envs/cameo3.4/lib/python3.4/site-packages/cobra']"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "cobra.__path__"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'0.4.1'"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "cobra.__version__"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results = {}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "m = create_test_model('ecoli')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "10 loops, best of 3: 67.9 ms per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 10 loops, best of 3: 67.9 ms per loop>"
+      ]
+     },
+     "execution_count": 7,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "m2 = Model()\n",
+    "for r in m.reactions:\n",
+    "    m2.add_reaction(r)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['add_reaction'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "10 loops, best of 3: 28.2 ms per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 10 loops, best of 3: 28.2 ms per loop>"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "m2 = Model()\n",
+    "m2.add_reactions(m.reactions)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['add_reactions'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 913 ms per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 913 ms per loop>"
+      ]
+     },
+     "execution_count": 15,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")\n",
+    "for reaction in model.reactions:\n",
+    "    model.remove_reactions([reaction])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['remove_reaction'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 589 ms per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 589 ms per loop>"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['read_sbml_model'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "%%timeit -o\n",
+    "for reaction in model.reactions:\n",
+    "    model.objective = reaction\n",
+    "    solution = model.optimize()\n",
+    "    solution.f"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['iteratete_all_reactions_set_objective_solve'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "10 loops, best of 3: 60.3 ms per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 10 loops, best of 3: 60.3 ms per loop>"
+      ]
+     },
+     "execution_count": 13,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "model.copy()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['model_copy'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [conda env:cameo3.4]",
+   "language": "python",
+   "name": "conda-env-cameo3.4-py"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.4.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/benchmarks/cobra-cameo-merge-benchmarks.ipynb b/benchmarks/cobra-cameo-merge-benchmarks.ipynb
new file mode 100644
index 0000000..0742c6f
--- /dev/null
+++ b/benchmarks/cobra-cameo-merge-benchmarks.ipynb
@@ -0,0 +1,548 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/html": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "application/javascript": [
+       "\n",
+       "(function(global) {\n",
+       "  function now() {\n",
+       "    return new Date();\n",
+       "  }\n",
+       "\n",
+       "  var force = \"1\";\n",
+       "\n",
+       "  if (typeof (window._bokeh_onload_callbacks) === \"undefined\" || force !== \"\") {\n",
+       "    window._bokeh_onload_callbacks = [];\n",
+       "    window._bokeh_is_loading = undefined;\n",
+       "  }\n",
+       "\n",
+       "\n",
+       "  \n",
+       "  if (typeof (window._bokeh_timeout) === \"undefined\" || force !== \"\") {\n",
+       "    window._bokeh_timeout = Date.now() + 5000;\n",
+       "    window._bokeh_failed_load = false;\n",
+       "  }\n",
+       "\n",
+       "  var NB_LOAD_WARNING = {'data': {'text/html':\n",
+       "     \"<div style='background-color: #fdd'>\\n\"+\n",
+       "     \"<p>\\n\"+\n",
+       "     \"BokehJS does not appear to have successfully loaded. If loading BokehJS from CDN, this \\n\"+\n",
+       "     \"may be due to a slow or bad network connection. Possible fixes:\\n\"+\n",
+       "     \"</p>\\n\"+\n",
+       "     \"<ul>\\n\"+\n",
+       "     \"<li>re-rerun `output_notebook()` to attempt to load from CDN again, or</li>\\n\"+\n",
+       "     \"<li>use INLINE resources instead, as so:</li>\\n\"+\n",
+       "     \"</ul>\\n\"+\n",
+       "     \"<code>\\n\"+\n",
+       "     \"from bokeh.resources import INLINE\\n\"+\n",
+       "     \"output_notebook(resources=INLINE)\\n\"+\n",
+       "     \"</code>\\n\"+\n",
+       "     \"</div>\"}};\n",
+       "\n",
+       "  function display_loaded() {\n",
+       "    if (window.Bokeh !== undefined) {\n",
+       "      Bokeh.$(\"#\").text(\"BokehJS successfully loaded.\");\n",
+       "    } else if (Date.now() < window._bokeh_timeout) {\n",
+       "      setTimeout(display_loaded, 100)\n",
+       "    }\n",
+       "  }\n",
+       "\n",
+       "  function run_callbacks() {\n",
+       "    window._bokeh_onload_callbacks.forEach(function(callback) { callback() });\n",
+       "    delete window._bokeh_onload_callbacks\n",
+       "    console.info(\"Bokeh: all callbacks have finished\");\n",
+       "  }\n",
+       "\n",
+       "  function load_libs(js_urls, callback) {\n",
+       "    window._bokeh_onload_callbacks.push(callback);\n",
+       "    if (window._bokeh_is_loading > 0) {\n",
+       "      console.log(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n",
+       "      return null;\n",
+       "    }\n",
+       "    if (js_urls == null || js_urls.length === 0) {\n",
+       "      run_callbacks();\n",
+       "      return null;\n",
+       "    }\n",
+       "    console.log(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n",
+       "    window._bokeh_is_loading = js_urls.length;\n",
+       "    for (var i = 0; i < js_urls.length; i++) {\n",
+       "      var url = js_urls[i];\n",
+       "      var s = document.createElement('script');\n",
+       "      s.src = url;\n",
+       "      s.async = false;\n",
+       "      s.onreadystatechange = s.onload = function() {\n",
+       "        window._bokeh_is_loading--;\n",
+       "        if (window._bokeh_is_loading === 0) {\n",
+       "          console.log(\"Bokeh: all BokehJS libraries loaded\");\n",
+       "          run_callbacks()\n",
+       "        }\n",
+       "      };\n",
+       "      s.onerror = function() {\n",
+       "        console.warn(\"failed to load library \" + url);\n",
+       "      };\n",
+       "      console.log(\"Bokeh: injecting script tag for BokehJS library: \", url);\n",
+       "      document.getElementsByTagName(\"head\")[0].appendChild(s);\n",
+       "    }\n",
+       "  };\n",
+       "\n",
+       "  var js_urls = ['https://cdn.pydata.org/bokeh/release/bokeh-0.12.2.min.js', 'https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.2.min.js', 'https://cdn.pydata.org/bokeh/release/bokeh-compiler-0.12.2.min.js'];\n",
+       "\n",
+       "  var inline_js = [\n",
+       "    function(Bokeh) {\n",
+       "      Bokeh.set_log_level(\"info\");\n",
+       "    },\n",
+       "    \n",
+       "    function(Bokeh) {\n",
+       "      \n",
+       "      Bokeh.$(\"#5a10f400-90ee-41ca-8c98-41a150f2b7a8\").text(\"BokehJS is loading...\");\n",
+       "    },\n",
+       "    function(Bokeh) {\n",
+       "      console.log(\"Bokeh: injecting CSS: https://cdn.pydata.org/bokeh/release/bokeh-0.12.2.min.css\");\n",
+       "      Bokeh.embed.inject_css(\"https://cdn.pydata.org/bokeh/release/bokeh-0.12.2.min.css\");\n",
+       "      console.log(\"Bokeh: injecting CSS: https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.2.min.css\");\n",
+       "      Bokeh.embed.inject_css(\"https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.2.min.css\");\n",
+       "    }\n",
+       "  ];\n",
+       "\n",
+       "  function run_inline_js() {\n",
+       "    \n",
+       "    if ((window.Bokeh !== undefined) || (force === \"1\")) {\n",
+       "      for (var i = 0; i < inline_js.length; i++) {\n",
+       "        inline_js[i](window.Bokeh);\n",
+       "      }} else if (Date.now() < window._bokeh_timeout) {\n",
+       "      setTimeout(run_inline_js, 100);\n",
+       "    } else if (!window._bokeh_failed_load) {\n",
+       "      console.log(\"Bokeh: BokehJS failed to load within specified timeout.\");\n",
+       "      window._bokeh_failed_load = true;\n",
+       "    } else if (!force) {\n",
+       "      var cell = $(\"#\").parents('.cell').data().cell;\n",
+       "      cell.output_area.append_execute_result(NB_LOAD_WARNING)\n",
+       "    }\n",
+       "\n",
+       "  }\n",
+       "\n",
+       "  if (window._bokeh_is_loading === 0) {\n",
+       "    console.log(\"Bokeh: BokehJS loaded, going straight to plotting\");\n",
+       "    run_inline_js();\n",
+       "  } else {\n",
+       "    load_libs(js_urls, function() {\n",
+       "      console.log(\"Bokeh: BokehJS plotting callback run at\", now());\n",
+       "      run_inline_js();\n",
+       "    });\n",
+       "  }\n",
+       "}(this));"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "import cobra\n",
+    "from cobra.io import read_sbml_model\n",
+    "from cobra.test import create_test_model\n",
+    "from cobra import Model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['/Users/niso/Dev/cobrapy-fork/cobra']"
+      ]
+     },
+     "execution_count": 2,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "cobra.__path__"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'0.4.2b2.post32+gitf12ab3a'"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "cobra.__version__"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results = {}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "m = create_test_model('ecoli')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "m.solver = 'glpk'"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 18,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 2.58 s per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 2.58 s per loop>"
+      ]
+     },
+     "execution_count": 18,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "m2 = Model()\n",
+    "for r in m.reactions:\n",
+    "    m2.add_reaction(r)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['add_reaction'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 1.79 s per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 1.79 s per loop>"
+      ]
+     },
+     "execution_count": 20,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "m2 = Model()\n",
+    "m2.add_reactions(m.reactions)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['add_reactions'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 27,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 7.67 s per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 7.67 s per loop>"
+      ]
+     },
+     "execution_count": 27,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")\n",
+    "for reaction in model.reactions:\n",
+    "    model.remove_reactions([reaction])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 28,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['remove_reaction'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 29,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 7.05 s per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 7.05 s per loop>"
+      ]
+     },
+     "execution_count": 29,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")\n",
+    "model.remove_reactions(model.reactions)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 28,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['remove_reactions'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%%timeit -o\n",
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "model = read_sbml_model(\"/Users/niso/Dev/cobrapy-fork/cobra/test/data/iJO1366.xml\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 24,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 13.7 s per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 13.7 s per loop>"
+      ]
+     },
+     "execution_count": 24,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "for reaction in model.reactions:\n",
+    "    model.objective = reaction\n",
+    "    solution = model.solve()\n",
+    "    solution.f"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 25,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['iteratete_all_reactions_set_objective_solve'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1 loop, best of 3: 794 ms per loop\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "<TimeitResult : 1 loop, best of 3: 794 ms per loop>"
+      ]
+     },
+     "execution_count": 13,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%timeit -o\n",
+    "model.copy()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "benchmark_results['model_copy'] = _"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [conda env:cobra-merge]",
+   "language": "python",
+   "name": "conda-env-cobra-merge-py"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.4.5"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/cobra/VERSION b/cobra/VERSION
deleted file mode 100644
index 416bfb0..0000000
--- a/cobra/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-0.5.9
diff --git a/cobra/__init__.py b/cobra/__init__.py
index 239c694..21666c1 100644
--- a/cobra/__init__.py
+++ b/cobra/__init__.py
@@ -1,20 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 # set the warning format to be on a single line
 import warnings as _warnings
-from os.path import abspath as _abspath, dirname as _dirname
 from os import name as _name
+from os.path import abspath as _abspath
+from os.path import dirname as _dirname
 
-from .version import get_version
-from .core import Object, Metabolite, Gene, Reaction, Model, \
-    DictList, Species
-from . import io, flux_analysis, design
-
-try:
-    from .core import ArrayBasedModel
-except ImportError:
-    None
+from cobra import design, flux_analysis, io
+from cobra.core import (
+    DictList, Gene, Metabolite, Model, Object, Reaction, Species)
+from cobra.util.version_info import show_versions
 
-__version__ = get_version()
-del get_version
+__version__ = "0.8.2"
 
 # set the warning format to be prettier and fit on one line
 _cobra_path = _dirname(_abspath(__file__))
diff --git a/cobra/config.py b/cobra/config.py
new file mode 100644
index 0000000..0a8b647
--- /dev/null
+++ b/cobra/config.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import logging
+
+log = logging.getLogger(__name__)
+
+non_zero_flux_threshold = 1e-6
+ndecimals = 6
diff --git a/cobra/core/Metabolite.py b/cobra/core/Metabolite.py
deleted file mode 100644
index e764e1d..0000000
--- a/cobra/core/Metabolite.py
+++ /dev/null
@@ -1,285 +0,0 @@
-from warnings import warn
-import re
-
-from six import iteritems
-
-from .Species import Species
-
-# Numbers are not required because of the |(?=[A-Z])? block. See the
-# discussion in https://github.com/opencobra/cobrapy/issues/128 for
-# more details.
-element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)")
-
-
-class Metabolite(Species):
-    """Metabolite is a class for holding information regarding
-    a metabolite in a cobra.Reaction object.
-
-    """
-
-    def __init__(self, id=None, formula=None, name="",
-                 charge=None, compartment=None):
-        """
-        id: str
-
-        formula: str
-            Chemical formula (i.e. H2O)
-
-        name: str
-            A human readable name.
-
-        compartment: str or None
-            Compartment of metabolite.
-        """
-        Species.__init__(self, id, name)
-        self.formula = formula
-        # because in a Model a metabolite may participate in multiple Reactions
-        self.compartment = compartment
-        self.charge = charge
-
-        self._constraint_sense = 'E'
-        self._bound = 0.
-
-    @property
-    def elements(self):
-        tmp_formula = self.formula
-        if tmp_formula is None:
-            return {}
-        # necessary for some old pickles which use the deprecated
-        # Formula class
-        tmp_formula = str(self.formula)
-        # commonly occuring characters in incorrectly constructed formulas
-        if "*" in tmp_formula:
-            warn("invalid character '*' found in formula '%s'" % self.formula)
-            tmp_formula = tmp_formula.replace("*", "")
-        if "(" in tmp_formula or ")" in tmp_formula:
-            warn("invalid formula (has parenthesis) in '%s'" % self.formula)
-            return None
-        composition = {}
-        parsed = element_re.findall(tmp_formula)
-        for (element, count) in parsed:
-            if count == '':
-                count = 1
-            else:
-                try:
-                    count = float(count)
-                    int_count = int(count)
-                    if count == int_count:
-                        count = int_count
-                    else:
-                        warn("%s is not an integer (in formula %s)" %
-                             (count, self.formula))
-                except ValueError:
-                    warn("failed to parse %s (in formula %s)" %
-                         (count, self.formula))
-                    return None
-            if element in composition:
-                composition[element] += count
-            else:
-                composition[element] = count
-        return composition
-
-    @elements.setter
-    def elements(self, elements_dict):
-        def stringify(element, number):
-            return element if number == 1 else element + str(number)
-
-        self.formula = ''.join(stringify(e, n) for e, n in
-                               sorted(iteritems(elements_dict)))
-
-    @property
-    def formula_weight(self):
-        """Calculate the formula weight"""
-        try:
-            return sum([count * elements_and_molecular_weights[element]
-                        for element, count in self.elements.items()])
-        except KeyError as e:
-            warn("The element %s does not appear in the peridic table" % e)
-
-    @property
-    def y(self):
-        """The shadow price for the metabolite in the most recent solution
-
-        Shadow prices are computed from the dual values of the bounds in
-        the solution.
-
-        """
-        try:
-            return self._model.solution.y_dict[self.id]
-        except Exception as e:
-            if self._model is None:
-                raise Exception("not part of a model")
-            if not hasattr(self._model, "solution") or \
-                    self._model.solution is None or \
-                    self._model.solution.status == "NA":
-                raise Exception("model has not been solved")
-            if self._model.solution.status != "optimal":
-                raise Exception("model solution was not optimal")
-            raise e  # Not sure what the exact problem was
-
-    def remove_from_model(self, method='subtractive', **kwargs):
-        """Removes the association from self.model
-
-        method: 'subtractive' or 'destructive'.
-            If 'subtractive' then the metabolite is removed from all
-            associated reactions.  If 'destructive' then all associated
-            reactions are removed from the Model.
-
-        """
-        # why is model being taken in as a parameter? This plays
-        # back to the question of allowing a Metabolite to be associated
-        # with multiple Models
-        if "model" in kwargs:
-            warn("model argument deprecated")
-
-        self._model.metabolites.remove(self)
-        self._model = None
-        if method.lower() == 'subtractive':
-            for the_reaction in list(self._reaction):
-                the_coefficient = the_reaction._metabolites[self]
-                the_reaction.subtract_metabolites({self: the_coefficient})
-        elif method.lower() == 'destructive':
-            for x in self._reaction:
-                x.remove_from_model()
-        else:
-            raise Exception(method + " is not 'subtractive' or 'destructive'")
-
-    def summary(self, **kwargs):
-        """Print a summary of the reactions which produce and consume this
-        metabolite. This method requires the model for which this metabolite is
-        a part to be solved.
-
-        threshold: float
-            a value below which to ignore reaction fluxes
-
-        fva: float (0->1), or None
-            Whether or not to include flux variability analysis in the output.
-            If given, fva should be a float between 0 and 1, representing the
-            fraction of the optimum objective to be searched.
-
-        floatfmt: string
-            format method for floats, passed to tabulate. Default is '.3g'.
-
-        """
-        try:
-            from ..flux_analysis.summary import metabolite_summary
-            return metabolite_summary(self, **kwargs)
-        except ImportError:
-            warn('Summary methods require pandas/tabulate')
-
-elements_and_molecular_weights = {
-    'H':   1.007940,
-    'He':  4.002602,
-    'Li':  6.941000,
-    'Be':  9.012182,
-    'B':   10.811000,
-    'C':   12.010700,
-    'N':   14.006700,
-    'O':   15.999400,
-    'F':   18.998403,
-    'Ne':  20.179700,
-    'Na':  22.989770,
-    'Mg':  24.305000,
-    'Al':  26.981538,
-    'Si':  28.085500,
-    'P':   30.973761,
-    'S':   32.065000,
-    'Cl':  35.453000,
-    'Ar':  39.948000,
-    'K':   39.098300,
-    'Ca':  40.078000,
-    'Sc':  44.955910,
-    'Ti':  47.867000,
-    'V':   50.941500,
-    'Cr':  51.996100,
-    'Mn':  54.938049,
-    'Fe':  55.845000,
-    'Co':  58.933200,
-    'Ni':  58.693400,
-    'Cu':  63.546000,
-    'Zn':  65.409000,
-    'Ga':  69.723000,
-    'Ge':  72.640000,
-    'As':  74.921600,
-    'Se':  78.960000,
-    'Br':  79.904000,
-    'Kr':  83.798000,
-    'Rb':  85.467800,
-    'Sr':  87.620000,
-    'Y':   88.905850,
-    'Zr':  91.224000,
-    'Nb':  92.906380,
-    'Mo':  95.940000,
-    'Tc':  98.000000,
-    'Ru':  101.070000,
-    'Rh':  102.905500,
-    'Pd':  106.420000,
-    'Ag':  107.868200,
-    'Cd':  112.411000,
-    'In':  114.818000,
-    'Sn':  118.710000,
-    'Sb':  121.760000,
-    'Te':  127.600000,
-    'I':   126.904470,
-    'Xe':  131.293000,
-    'Cs':  132.905450,
-    'Ba':  137.327000,
-    'La':  138.905500,
-    'Ce':  140.116000,
-    'Pr':  140.907650,
-    'Nd':  144.240000,
-    'Pm':  145.000000,
-    'Sm':  150.360000,
-    'Eu':  151.964000,
-    'Gd':  157.250000,
-    'Tb':  158.925340,
-    'Dy':  162.500000,
-    'Ho':  164.930320,
-    'Er':  167.259000,
-    'Tm':  168.934210,
-    'Yb':  173.040000,
-    'Lu':  174.967000,
-    'Hf':  178.490000,
-    'Ta':  180.947900,
-    'W':   183.840000,
-    'Re':  186.207000,
-    'Os':  190.230000,
-    'Ir':  192.217000,
-    'Pt':  195.078000,
-    'Au':  196.966550,
-    'Hg':  200.590000,
-    'Tl':  204.383300,
-    'Pb':  207.200000,
-    'Bi':  208.980380,
-    'Po':  209.000000,
-    'At':  210.000000,
-    'Rn':  222.000000,
-    'Fr':  223.000000,
-    'Ra':  226.000000,
-    'Ac':  227.000000,
-    'Th':  232.038100,
-    'Pa':  231.035880,
-    'U':   238.028910,
-    'Np':  237.000000,
-    'Pu':  244.000000,
-    'Am':  243.000000,
-    'Cm':  247.000000,
-    'Bk':  247.000000,
-    'Cf':  251.000000,
-    'Es':  252.000000,
-    'Fm':  257.000000,
-    'Md':  258.000000,
-    'No':  259.000000,
-    'Lr':  262.000000,
-    'Rf':  261.000000,
-    'Db':  262.000000,
-    'Sg':  266.000000,
-    'Bh':  264.000000,
-    'Hs':  277.000000,
-    'Mt':  268.000000,
-    'Ds':  281.000000,
-    'Rg':  272.000000,
-    'Cn':  285.000000,
-    'Uuq': 289.000000,
-    'Uuh': 292.000000
-}
diff --git a/cobra/core/Model.py b/cobra/core/Model.py
deleted file mode 100644
index 388fb3e..0000000
--- a/cobra/core/Model.py
+++ /dev/null
@@ -1,384 +0,0 @@
-from warnings import warn
-from copy import deepcopy, copy
-
-from six import iteritems, string_types
-
-from ..solvers import optimize
-from .Object import Object
-from .Solution import Solution
-from .Reaction import Reaction
-from .DictList import DictList
-
-
-# Note, when a reaction is added to the Model it will no longer keep personal
-# instances of its Metabolites, it will reference Model.metabolites to improve
-# performance.  When doing this, take care to monitor metabolite coefficients.
-# Do the same for Model.reactions[:].genes and Model.genes
-
-class Model(Object):
-    """Metabolic Model
-
-    Refers to Metabolite, Reaction, and Gene Objects.
-    """
-
-    def __setstate__(self, state):
-        """Make sure all cobra.Objects in the model point to the model"""
-        self.__dict__.update(state)
-        for y in ['reactions', 'genes', 'metabolites']:
-            for x in getattr(self, y):
-                x._model = self
-        if not hasattr(self, "name"):
-            self.name = None
-
-    def __init__(self, id_or_model=None, name=None):
-        if isinstance(id_or_model, Model):
-            Object.__init__(self, name=name)
-            self.__setstate__(id_or_model.__dict__)
-            if not hasattr(self, "name"):
-                self.name = None
-        else:
-            Object.__init__(self, id_or_model, name=name)
-            self._trimmed = False
-            self._trimmed_genes = []
-            self._trimmed_reactions = {}
-            self.genes = DictList()
-            self.reactions = DictList()  # A list of cobra.Reactions
-            self.metabolites = DictList()  # A list of cobra.Metabolites
-            # genes based on their ids {Gene.id: Gene}
-            self.compartments = {}
-            self.solution = Solution(None)
-            self.media_compositions = {}
-
-    @property
-    def description(self):
-        warn("description deprecated")
-        return self.name if self.name is not None else ""
-
-    @description.setter
-    def description(self, value):
-        self.name = value
-        warn("description deprecated")
-
-    def __add__(self, other_model):
-        """Adds two models. +
-
-        The issue of reactions being able to exists in multiple Models now
-        arises, the same for metabolites and such.  This might be a little
-        difficult as a reaction with the same name / id in two models might
-        have different coefficients for their metabolites due to pH and whatnot
-        making them different reactions.
-
-        """
-        new_model = self.copy()
-        new_reactions = deepcopy(other_model.reactions)
-        new_model.add_reactions(new_reactions)
-        new_model.id = self.id + '_' + other_model.id
-        return new_model
-
-    def __iadd__(self, other_model):
-        """Adds a Model to this model +=
-
-        The issue of reactions being able to exists in multiple Models now
-        arises, the same for metabolites and such.  This might be a little
-        difficult as a reaction with the same name / id in two models might
-        have different coefficients for their metabolites due to pH and whatnot
-        making them different reactions.
-
-        """
-        new_reactions = deepcopy(other_model.reactions)
-        self.add_reactions(new_reactions)
-        self.id = self.id + '_' + other_model.id
-        return self
-
-    def copy(self):
-        """Provides a partial 'deepcopy' of the Model.  All of the Metabolite,
-        Gene, and Reaction objects are created anew but in a faster fashion
-        than deepcopy
-        """
-        new = self.__class__()
-        do_not_copy_by_ref = {"metabolites", "reactions", "genes", "notes",
-                              "annotation"}
-        for attr in self.__dict__:
-            if attr not in do_not_copy_by_ref:
-                new.__dict__[attr] = self.__dict__[attr]
-        new.notes = deepcopy(self.notes)
-        new.annotation = deepcopy(self.annotation)
-
-        new.metabolites = DictList()
-        do_not_copy_by_ref = {"_reaction", "_model"}
-        for metabolite in self.metabolites:
-            new_met = metabolite.__class__()
-            for attr, value in iteritems(metabolite.__dict__):
-                if attr not in do_not_copy_by_ref:
-                    new_met.__dict__[attr] = copy(
-                        value) if attr == "formula" else value
-            new_met._model = new
-            new.metabolites.append(new_met)
-
-        new.genes = DictList()
-        for gene in self.genes:
-            new_gene = gene.__class__(None)
-            for attr, value in iteritems(gene.__dict__):
-                if attr not in do_not_copy_by_ref:
-                    new_gene.__dict__[attr] = copy(
-                        value) if attr == "formula" else value
-            new_gene._model = new
-            new.genes.append(new_gene)
-
-        new.reactions = DictList()
-        do_not_copy_by_ref = {"_model", "_metabolites", "_genes"}
-        for reaction in self.reactions:
-            new_reaction = reaction.__class__()
-            for attr, value in iteritems(reaction.__dict__):
-                if attr not in do_not_copy_by_ref:
-                    new_reaction.__dict__[attr] = copy(value)
-            new_reaction._model = new
-            new.reactions.append(new_reaction)
-            # update awareness
-            for metabolite, stoic in iteritems(reaction._metabolites):
-                new_met = new.metabolites.get_by_id(metabolite.id)
-                new_reaction._metabolites[new_met] = stoic
-                new_met._reaction.add(new_reaction)
-            for gene in reaction._genes:
-                new_gene = new.genes.get_by_id(gene.id)
-                new_reaction._genes.add(new_gene)
-                new_gene._reaction.add(new_reaction)
-        return new
-
-    def add_metabolites(self, metabolite_list):
-        """Will add a list of metabolites to the the object, if they do not
-        exist and then expand the stochiometric matrix
-
-        metabolite_list: A list of :class:`~cobra.core.Metabolite` objects
-
-        """
-        if not hasattr(metabolite_list, '__iter__'):
-            metabolite_list = [metabolite_list]
-        # First check whether the metabolites exist in the model
-        metabolite_list = [x for x in metabolite_list
-                           if x.id not in self.metabolites]
-        for x in metabolite_list:
-            x._model = self
-        self.metabolites += metabolite_list
-
-    def add_reaction(self, reaction):
-        """Will add a cobra.Reaction object to the model, if
-        reaction.id is not in self.reactions.
-
-        reaction: A :class:`~cobra.core.Reaction` object
-
-        """
-        self.add_reactions([reaction])
-
-    def add_reactions(self, reaction_list):
-        """Will add a cobra.Reaction object to the model, if
-        reaction.id is not in self.reactions.
-
-        reaction_list: A list of :class:`~cobra.core.Reaction` objects
-
-        """
-
-        try:
-            reaction_list = DictList(reaction_list)
-        except TypeError:
-            # This function really should not used for single reactions
-            reaction_list = DictList([reaction_list])
-            warn("Use add_reaction for single reactions")
-
-        # Only add the reaction if one with the same ID is not already
-        # present in the model.
-        reactions_in_model = [
-            i.id for i in reaction_list if self.reactions.has_id(
-                i.id)]
-
-        if len(reactions_in_model) > 0:
-            raise Exception("Reactions already in the model: " +
-                            ", ".join(reactions_in_model))
-
-        # Add reactions. Also take care of genes and metabolites in the loop
-        for reaction in reaction_list:
-            reaction._model = self  # the reaction now points to the model
-            # keys() is necessary because the dict will be modified during
-            # the loop
-            for metabolite in list(reaction._metabolites.keys()):
-                # if the metabolite is not in the model, add it
-                # should we be adding a copy instead.
-                if not self.metabolites.has_id(metabolite.id):
-                    self.metabolites.append(metabolite)
-                    metabolite._model = self
-                    # this should already be the case. Is it necessary?
-                    metabolite._reaction = set([reaction])
-                # A copy of the metabolite exists in the model, the reaction
-                # needs to point to the metabolite in the model.
-                else:
-                    stoichiometry = reaction._metabolites.pop(metabolite)
-                    model_metabolite = self.metabolites.get_by_id(
-                        metabolite.id)
-                    reaction._metabolites[model_metabolite] = stoichiometry
-                    model_metabolite._reaction.add(reaction)
-
-            for gene in list(reaction._genes):
-                # If the gene is not in the model, add it
-                if not self.genes.has_id(gene.id):
-                    self.genes.append(gene)
-                    gene._model = self
-                    # this should already be the case. Is it necessary?
-                    gene._reaction = set([reaction])
-                # Otherwise, make the gene point to the one in the model
-                else:
-                    model_gene = self.genes.get_by_id(gene.id)
-                    if model_gene is not gene:
-                        reaction._dissociate_gene(gene)
-                        reaction._associate_gene(model_gene)
-
-        self.reactions += reaction_list
-
-    def to_array_based_model(self, deepcopy_model=False, **kwargs):
-        """Makes a :class:`~cobra.core.ArrayBasedModel` from a cobra.Model which
-        may be used to perform linear algebra operations with the
-        stoichiomatric matrix.
-
-        deepcopy_model: Boolean.  If False then the ArrayBasedModel points
-        to the Model
-
-        """
-        from .ArrayBasedModel import ArrayBasedModel
-        return ArrayBasedModel(self, deepcopy_model=deepcopy_model, **kwargs)
-
-    def optimize(self, objective_sense='maximize', **kwargs):
-        r"""Optimize model using flux balance analysis
-
-        objective_sense: 'maximize' or 'minimize'
-
-        solver: 'glpk', 'cglpk', 'gurobi', 'cplex' or None
-
-        quadratic_component: None or :class:`scipy.sparse.dok_matrix`
-            The dimensions should be (n, n) where n is the number of reactions.
-
-            This sets the quadratic component (Q) of the objective coefficient,
-            adding :math:`\\frac{1}{2} v^T \cdot Q \cdot v` to the objective.
-
-        tolerance_feasibility: Solver tolerance for feasibility.
-
-        tolerance_markowitz: Solver threshold during pivot
-
-        time_limit: Maximum solver time (in seconds)
-
-        .. NOTE :: Only the most commonly used parameters are presented here.
-                   Additional parameters for cobra.solvers may be available and
-                   specified with the appropriate keyword argument.
-
-        """
-        solution = optimize(self, objective_sense=objective_sense, **kwargs)
-        self.solution = solution
-        return solution
-
-    def remove_reactions(self, reactions, delete=True,
-                         remove_orphans=False):
-        """remove reactions from the model
-
-        reactions: [:class:`~cobra.core.Reaction.Reaction`] or [str]
-            The reactions (or their id's) to remove
-
-        delete: Boolean
-            Whether or not the reactions should be deleted after removal.
-            If the reactions are not deleted, those objects will be
-            recreated with new metabolite and gene objects.
-
-        remove_orphans: Boolean
-            Remove orphaned genes and metabolites from the model as well
-
-        """
-        if isinstance(reactions, string_types) or hasattr(reactions, "id"):
-            warn("need to pass in a list")
-            reactions = [reactions]
-        for reaction in reactions:
-            try:
-                reaction = self.reactions[self.reactions.index(reaction)]
-            except ValueError:
-                warn('%s not in %s' % (reaction, self))
-            else:
-                if delete:
-                    reaction.delete(remove_orphans=remove_orphans)
-                else:
-                    reaction.remove_from_model(remove_orphans=remove_orphans)
-
-    def repair(self, rebuild_index=True, rebuild_relationships=True):
-        """Update all indexes and pointers in a model"""
-        if rebuild_index:  # DictList indexes
-            self.reactions._generate_index()
-            self.metabolites._generate_index()
-            self.genes._generate_index()
-        if rebuild_relationships:
-            for met in self.metabolites:
-                met._reaction.clear()
-            for gene in self.genes:
-                gene._reaction.clear()
-            for rxn in self.reactions:
-                for met in rxn._metabolites:
-                    met._reaction.add(rxn)
-                for gene in rxn._genes:
-                    gene._reaction.add(rxn)
-        # point _model to self
-        for l in (self.reactions, self.genes, self.metabolites):
-            for e in l:
-                e._model = self
-        if self.solution is None:
-            self.solution = Solution(None)
-        return
-
-    def change_objective(self, objectives):
-        """Change the model objective"""
-        self.objective = objectives
-
-    @property
-    def objective(self):
-        return {reaction: reaction.objective_coefficient
-                for reaction in self.reactions
-                if reaction.objective_coefficient != 0}
-
-    @objective.setter
-    def objective(self, objectives):
-        # set all objective coefficients to 0 initially
-        for x in self.reactions:
-            x.objective_coefficient = 0.
-        # case of a single reaction
-        if isinstance(objectives, string_types) or \
-                isinstance(objectives, Reaction):
-            self.reactions.get_by_id(str(objectives)).objective_coefficient = 1
-        elif isinstance(objectives, int):
-            self.reactions[objectives].objective_coefficient = 1
-
-        # case of an iterable
-        else:
-            for reaction_id in objectives:
-                if isinstance(reaction_id, int):  # index in a list
-                    reaction = self.reactions[reaction_id]
-                else:
-                    reaction = self.reactions.get_by_id(str(reaction_id))
-                # objective coefficient obtained from a dict, and is 1. if
-                # from a list.
-                reaction.objective_coefficient = objectives[reaction_id] \
-                    if hasattr(objectives, "items") else 1.
-
-    def summary(self, **kwargs):
-        """Print a summary of the input and output fluxes of the model. This
-        method requires the model to have been previously solved.
-
-        threshold: float
-            tolerance for determining if a flux is zero (not printed)
-
-        fva: int or None
-            Whether or not to calculate and report flux variability in the
-            output summary
-
-        floatfmt: string
-            format method for floats, passed to tabulate. Default is '.3g'.
-
-        """
-
-        try:
-            from ..flux_analysis.summary import model_summary
-            return model_summary(self, **kwargs)
-        except ImportError:
-            warn('Summary methods require pandas/tabulate')
diff --git a/cobra/core/Object.py b/cobra/core/Object.py
deleted file mode 100644
index ab23ed3..0000000
--- a/cobra/core/Object.py
+++ /dev/null
@@ -1,26 +0,0 @@
-class Object(object):
-    """Defines common behavior of object in cobra.core"""
-
-    def __init__(self, id=None, name=""):
-        """
-        id: None or a string
-
-        """
-        self.id = id
-        self.name = name
-
-        self.notes = {}
-        self.annotation = {}
-
-    def __getstate__(self):
-        """To prevent excessive replication during deepcopy."""
-        state = self.__dict__.copy()
-        if '_model' in state:
-            state['_model'] = None
-        return state
-
-    def __repr__(self):
-        return "<%s %s at 0x%x>" % (self.__class__.__name__, self.id, id(self))
-
-    def __str__(self):
-        return str(self.id)
diff --git a/cobra/core/Reaction.py b/cobra/core/Reaction.py
deleted file mode 100644
index d500c82..0000000
--- a/cobra/core/Reaction.py
+++ /dev/null
@@ -1,708 +0,0 @@
-from __future__ import print_function
-
-from collections import defaultdict
-import re
-from copy import copy, deepcopy
-from warnings import warn
-
-from six import string_types, iteritems
-
-from .Object import Object
-from .Gene import Gene, parse_gpr, ast2str
-from .Metabolite import Metabolite
-
-
-class Frozendict(dict):
-    """Read-only dictionary view"""
-
-    def __setitem__(self, key, value):
-        raise NotImplementedError("read-only")
-
-    def __delitem__(self, key):
-        raise NotImplementedError("read-only")
-
-    def pop(self, key, value):
-        raise NotImplementedError("read-only")
-
-    def popitem(self):
-        raise NotImplementedError("read-only")
-
-
-def _is_positive(n):
-    try:
-        if n >= 0:
-            return True
-        else:
-            return False
-    except:
-        return True
-
-# precompiled regular expressions
-# Matches and/or in a gene reaction rule
-and_or_search = re.compile(r'\(| and| or|\+|\)', re.IGNORECASE)
-uppercase_AND = re.compile(r'\bAND\b')
-uppercase_OR = re.compile(r'\bOR\b')
-gpr_clean = re.compile(' {2,}')
-# This regular expression finds any single letter compartment enclosed in
-# square brackets at the beginning of the string. For example [c] : foo --> bar
-compartment_finder = re.compile("^\s*(\[[A-Za-z]\])\s*:*")
-# Regular expressions to match the arrows
-_reversible_arrow_finder = re.compile("<(-+|=+)>")
-_forward_arrow_finder = re.compile("(-+|=+)>")
-_reverse_arrow_finder = re.compile("<(-+|=+)")
-
-
-class Reaction(Object):
-    """Reaction is a class for holding information regarding
-    a biochemical reaction in a cobra.Model object
-
-    """
-
-    def __init__(self, id=None, name='', subsystem='', lower_bound=0.,
-                 upper_bound=1000., objective_coefficient=0.):
-        """An object for housing reactions and associated information
-        for cobra modeling.
-
-        """
-        Object.__init__(self, id, name)
-        self._gene_reaction_rule = ''
-        self.subsystem = subsystem
-        # The cobra.Genes that are used to catalyze the reaction
-        self._genes = set()
-        # A dictionary of metabolites and their stoichiometric coefficients in
-        # this reaction.
-        self._metabolites = {}
-        # self.model is None or refers to the cobra.Model that
-        # contains self
-        self._model = None
-
-        self.objective_coefficient = objective_coefficient
-        self.upper_bound = upper_bound
-        self.lower_bound = lower_bound
-        # Used during optimization.  Indicates whether the
-        # variable is modeled as continuous, integer, binary, semicontinous, or
-        # semiinteger.
-        self.variable_kind = 'continuous'
-
-    # read-only
-    @property
-    def metabolites(self):
-        return Frozendict(self._metabolites)
-
-    @property
-    def genes(self):
-        return frozenset(self._genes)
-
-    @property
-    def gene_reaction_rule(self):
-        return self._gene_reaction_rule
-
-    @gene_reaction_rule.setter
-    def gene_reaction_rule(self, new_rule):
-        self._gene_reaction_rule = new_rule.strip()
-        try:
-            _, gene_names = parse_gpr(self._gene_reaction_rule)
-        except (SyntaxError, TypeError) as e:
-            if "AND" in new_rule or "OR" in new_rule:
-                warn("uppercase AND/OR found in rule '%s' for '%s'" %
-                     (new_rule, repr(self)))
-                new_rule = uppercase_AND.sub("and", new_rule)
-                new_rule = uppercase_OR.sub("or", new_rule)
-                self.gene_reaction_rule = new_rule
-                return
-            warn("malformed gene_reaction_rule '%s' for %s" %
-                 (new_rule, repr(self)))
-            tmp_str = and_or_search.sub('', self._gene_reaction_rule)
-            gene_names = set((gpr_clean.sub(' ', tmp_str).split(' ')))
-        if '' in gene_names:
-            gene_names.remove('')
-        old_genes = self._genes
-        if self._model is None:
-            self._genes = {Gene(i) for i in gene_names}
-        else:
-            model_genes = self._model.genes
-            self._genes = set()
-            for id in gene_names:
-                if model_genes.has_id(id):
-                    self._genes.add(model_genes.get_by_id(id))
-                else:
-                    new_gene = Gene(id)
-                    new_gene._model = self._model
-                    self._genes.add(new_gene)
-                    model_genes.append(new_gene)
-
-        # Make the genes aware that it is involved in this reaction
-        for g in self._genes:
-            g._reaction.add(self)
-
-        # make the old genes aware they are no longer involved in this reaction
-        for g in old_genes:
-            if g not in self._genes:  # if an old gene is not a new gene
-                try:
-                    g._reaction.remove(self)
-                except:
-                    warn("could not remove old gene %s from reaction %s" %
-                         (g.id, self.id))
-
-    @property
-    def gene_name_reaction_rule(self):
-        """Display gene_reaction_rule with names intead.
-
-        Do NOT use this string for computation. It is intended to give a
-        representation of the rule using more familiar gene names instead of
-        the often cryptic ids.
-
-        """
-        names = {i.id: i.name for i in self._genes}
-        ast = parse_gpr(self._gene_reaction_rule)[0]
-        return ast2str(ast, names=names)
-
-    @property
-    def x(self):
-        """The flux through the reaction in the most recent solution
-
-        Flux values are computed from the primal values of the variables in
-        the solution.
-
-        """
-        try:
-            return self._model.solution.x_dict[self.id]
-        except Exception as e:
-            if self._model is None:
-                raise Exception("not part of a model")
-            if not hasattr(self._model, "solution") or \
-                    self._model.solution is None or \
-                    self._model.solution.status == "NA":
-                raise Exception("model has not been solved")
-            if self._model.solution.status != "optimal":
-                raise Exception("model solution was not optimal")
-            raise e  # Not sure what the exact problem was
-
-    @property
-    def bounds(self):
-        """ A more convienient bounds structure than seperate upper and lower
-        bounds """
-
-        return (self.lower_bound, self.upper_bound)
-
-    @bounds.setter
-    def bounds(self, value):
-        """ Set the bounds directly from a tuple """
-
-        self.lower_bound = value[0]
-        self.upper_bound = value[1]
-
-    @property
-    def reversibility(self):
-        """Whether the reaction can proceed in both directions (reversible)
-
-        This is computed from the current upper and lower bounds.
-
-        """
-        return self.lower_bound < 0 and self.upper_bound > 0
-
-    @reversibility.setter
-    def reversibility(self, value):
-        warn("Setting reaction reversibility is ignored")
-
-    @property
-    def boundary(self):
-        # single metabolite implies it must be a boundary
-        if len(self._metabolites) == 1:
-            return "system_boundary"
-        # if there is more than one metabolite, if it ONLY produces or ONLY
-        # consumes, it is also a boundary.
-        all_stoichiometry = self._metabolites.values()
-        if not min(all_stoichiometry) < 0 < max(all_stoichiometry):
-            return "system_boundary"
-        return None
-
-    @property
-    def model(self):
-        """returns the model the reaction is a part of"""
-        return self._model
-
-    def _update_awareness(self):
-        """Make sure all metabolites and genes that are associated with
-        this reaction are aware of it.
-
-        """
-        for x in self._metabolites:
-            x._reaction.add(self)
-        for x in self._genes:
-            x._reaction.add(self)
-
-    def remove_from_model(self, model=None, remove_orphans=False):
-        """Removes the reaction from the model while keeping it intact
-
-        remove_orphans: Boolean
-            Remove orphaned genes and metabolites from the model as well
-
-        model: deprecated argument, must be None
-
-        """
-        if model is not None:
-            warn("model does not need to be passed into remove_from_model")
-            if model != self._model:
-                raise Exception("Can not remove from a different model")
-        if self._model is None:
-            raise Exception("Reaction %s not in a model" % self.id)
-        # preserve the original attributes (but as copies)
-        model = self._model
-        new_metabolites = {copy(met): value
-                           for met, value in iteritems(self._metabolites)}
-        new_genes = {copy(i) for i in self._genes}
-        # Begin removing from the model
-        self._model = None
-        model.reactions.remove(self)
-        for x in self._metabolites:
-            x._reaction.remove(self)
-            if remove_orphans and len(x._reaction) == 0:
-                model.metabolites.remove(x)
-        for x in self._genes:
-            x._reaction.remove(self)
-            if remove_orphans and len(x._reaction) == 0:
-                model.genes.remove(x)
-        # Rebuild the model with the new independent genes/metabolites
-        self._metabolites = {}
-        self.add_metabolites(new_metabolites)
-        self._genes = set()
-        for k in new_genes:
-            self._associate_gene(k)
-
-    def delete(self, remove_orphans=False):
-        """Completely delete a reaction
-
-        This removes all associations between a reaction the associated
-        model, metabolites and genes (unlike remove_from_model which only
-        dissociates the reaction from the model).
-
-        remove_orphans: Boolean
-            Remove orphaned genes and metabolites from the model as well
-
-        """
-        model = self._model
-        if model is not None:
-            self._model.reactions.remove(self)
-        elif remove_orphans:
-            # can't remove orphans if not part of a model
-            remove_orphans = False
-        self._model = None
-        for x in self._metabolites:
-            if self in x._reaction:
-                x._reaction.remove(self)
-                if remove_orphans and len(x._reaction) == 0:
-                    model.metabolites.remove(x)
-        for x in self._genes:
-            if self in x._reaction:
-                x._reaction.remove(self)
-                if remove_orphans and len(x._reaction) == 0:
-                    model.genes.remove(x)
-        self._metabolites = {}
-        self._genes = set()
-
-    def __setstate__(self, state):
-        """Probably not necessary to set _model as the cobra.Model that
-        contains self sets the _model attribute for all metabolites and genes
-        in the reaction.
-
-        However, to increase performance speed we do want to let the metabolite
-        and gene know that they are employed in this reaction
-
-        """
-        # These are necessary for old pickles which store attributes
-        # which have since been superceded by properties.
-        if "reaction" in state:
-            state.pop("reaction")
-        if "gene_reaction_rule" in state:
-            state["_gene_reaction_rule"] = state.pop("gene_reaction_rule")
-
-        self.__dict__.update(state)
-        for x in state['_metabolites']:
-            setattr(x, '_model', self._model)
-            x._reaction.add(self)
-        for x in state['_genes']:
-            setattr(x, '_model', self._model)
-            x._reaction.add(self)
-
-    def copy(self):
-        """Copy a reaction
-
-        The referenced metabolites and genes are also copied.
-
-        """
-        # no references to model when copying
-        model = self._model
-        self._model = None
-        for i in self._metabolites:
-            i._model = None
-        for i in self._genes:
-            i._model = None
-        # now we can copy
-        new_reaction = deepcopy(self)
-        # restore the references
-        self._model = model
-        for i in self._metabolites:
-            i._model = model
-        for i in self._genes:
-            i._model = model
-        return new_reaction
-
-    def pop(self, metabolite_id):
-        """Remove a metabolite from the reaction and return the
-        stoichiometric coefficient.
-
-        metabolite_id: str or :class:`~cobra.core.Metabolite.Metabolite`
-
-        """
-        the_metabolite = metabolite_id
-        if isinstance(the_metabolite, string_types):
-            found_match = None
-            for possible_match in self._metabolites:
-                if possible_match.id == the_metabolite:
-                    found_match = possible_match
-                    break
-            if found_match is None:
-                raise KeyError(
-                    "No metabolite named %s in the reaction" % the_metabolite)
-            else:
-                the_metabolite = found_match
-        the_coefficient = self._metabolites.pop(the_metabolite)
-        the_metabolite._reaction.remove(self)
-        return the_coefficient
-
-    def __add__(self, other):
-        """Add two reactions
-
-        The stoichiometry will be the combined stoichiometry of the two
-        reactions, and the gene reaction rule will be both rules combined by an
-        and. All other attributes (i.e. reaction bounds) will match those of
-        the first reaction
-
-        """
-        new_reaction = self.copy()
-        new_reaction += other
-        return new_reaction
-
-    def __iadd__(self, other):
-        self.add_metabolites(other._metabolites, combine=True)
-        gpr1 = self.gene_reaction_rule.strip()
-        gpr2 = other.gene_reaction_rule.strip()
-        if gpr1 != '' and gpr2 != '':
-            self.gene_reaction_rule = "(%s) and (%s)" % \
-                (self.gene_reaction_rule, other.gene_reaction_rule)
-        elif gpr1 != '' and gpr2 == '':
-            self.gene_reaction_rule = gpr1
-        elif gpr1 == '' and gpr2 != '':
-            self.gene_reaction_rule = gpr2
-        return self
-
-    def __sub__(self, other):
-        new = self.copy()
-        new -= other
-        return new
-
-    def __isub__(self, other):
-        self.subtract_metabolites(other._metabolites, combine=True)
-        return self
-
-    def __imul__(self, coefficient):
-        """Scale coefficients in a reaction"""
-        self._metabolites = {k: coefficient * v for k, v in
-                             iteritems(self._metabolites)}
-        return self
-
-    def __mul__(self, coefficient):
-        new = self.copy()
-        new *= coefficient
-        return new
-
-    @property
-    def reactants(self):
-        """Return a list of reactants for the reaction."""
-        return [k for k, v in self._metabolites.items() if not _is_positive(v)]
-
-    @property
-    def products(self):
-        """Return a list of products for the reaction"""
-        return [k for k, v in self._metabolites.items() if _is_positive(v)]
-
-    def get_coefficient(self, metabolite_id):
-        """Return the stoichiometric coefficient for a metabolite in
-        the reaction.
-
-        metabolite_id: str or :class:`~cobra.core.Metabolite.Metabolite`
-
-        """
-        _id_to_metabolites = dict([(x.id, x)
-                                   for x in self._metabolites])
-
-        if hasattr(metabolite_id, 'id'):
-            metabolite_id = metabolite_id.id
-        return self._metabolites[_id_to_metabolites[metabolite_id]]
-
-    def get_coefficients(self, metabolite_ids):
-        """Return the stoichiometric coefficients for a list of
-        metabolites in the reaction.
-
-        metabolite_ids: iterable
-            Containing str or :class:`~cobra.core.Metabolite.Metabolite`
-
-        """
-        return map(self.get_coefficient, metabolite_ids)
-
-    def add_metabolites(self, metabolites, combine=True,
-                        add_to_container_model=True):
-        """Add metabolites and stoichiometric coefficients to the reaction.
-        If the final coefficient for a metabolite is 0 then it is removed
-        from the reaction.
-
-        metabolites: dict
-            {str or :class:`~cobra.core.Metabolite.Metabolite`: coefficient}
-
-        combine: Boolean.
-            Describes behavior a metabolite already exists in the reaction.
-            True causes the coefficients to be added.
-            False causes the coefficient to be replaced.
-            True and a metabolite already exists in the
-
-        add_to_container_model: Boolean.
-            Add the metabolite to the :class:`~cobra.core.Model.Model`
-            the reaction is associated with (i.e. self.model)
-
-        """
-        _id_to_metabolites = {str(x): x for x in self._metabolites}
-        new_metabolites = []
-        for metabolite, coefficient in iteritems(metabolites):
-            met_id = str(metabolite)
-            # If a metabolite already exists in the reaction then
-            # just add them.
-            if met_id in _id_to_metabolites:
-                reaction_metabolite = _id_to_metabolites[met_id]
-                if combine:
-                    self._metabolites[reaction_metabolite] += coefficient
-                else:
-                    self._metabolites[reaction_metabolite] = coefficient
-            else:
-                # If the reaction is in a model, ensure we aren't using
-                # a duplicate metabolite.
-                if self._model:
-                    try:
-                        metabolite = \
-                            self._model.metabolites.get_by_id(met_id)
-                    except KeyError as e:
-                        if isinstance(metabolite, Metabolite):
-                            new_metabolites.append(metabolite)
-                        else:
-                            # do we want to handle creation here?
-                            raise e
-                elif isinstance(metabolite, string_types):
-                    # if we want to handle creation, this should be changed
-                    raise ValueError("reaction '%s' does not belong to a model"
-                                     % self.id)
-                self._metabolites[metabolite] = coefficient
-                # make the metabolite aware that it is involved in this
-                # reaction
-                metabolite._reaction.add(self)
-        for metabolite, the_coefficient in list(self._metabolites.items()):
-            if the_coefficient == 0:
-                # make the metabolite aware that it no longer participates
-                # in this reaction
-                metabolite._reaction.remove(self)
-                self._metabolites.pop(metabolite)
-        if add_to_container_model and hasattr(self._model, 'add_metabolites'):
-            self._model.add_metabolites(new_metabolites)
-
-    def subtract_metabolites(self, metabolites, combine=True):
-        """This function will 'subtract' metabolites from a reaction, which
-        means add the metabolites with -1*coefficient. If the final coefficient
-        for a metabolite is 0 then the metabolite is removed from the reaction.
-
-        metabolites: dict of {:class:`~cobra.core.Metabolite`: coefficient}
-            These metabolites will be added to the reaction
-
-        .. note:: A final coefficient < 0 implies a reactant.
-
-        """
-        self.add_metabolites({k: -v for k, v in iteritems(metabolites)},
-                             combine=combine)
-
-    def clear_metabolites(self):
-        """Remove all metabolites from the reaction"""
-        for metabolite in list(self._metabolites.keys()):
-            self.pop(metabolite)
-
-    @property
-    def reaction(self):
-        """Human readable reaction string"""
-        return self.build_reaction_string()
-
-    @reaction.setter
-    def reaction(self, value):
-        return self.build_reaction_from_string(value)
-
-    def build_reaction_string(self, use_metabolite_names=False):
-        """Generate a human readable reaction string"""
-        def format(number):
-            return "" if number == 1 else str(number).rstrip(".") + " "
-        id_type = 'id'
-        if use_metabolite_names:
-            id_type = 'name'
-        reactant_bits = []
-        product_bits = []
-        for the_metabolite, coefficient in sorted(
-                iteritems(self._metabolites), key=lambda x: x[0].id):
-            name = str(getattr(the_metabolite, id_type))
-            if _is_positive(coefficient):
-                product_bits.append(format(coefficient) + name)
-            else:
-                reactant_bits.append(format(abs(coefficient)) + name)
-
-        reaction_string = ' + '.join(reactant_bits)
-        if not self.reversibility:
-            if self.lower_bound < 0 and self.upper_bound <= 0:
-                reaction_string += ' <-- '
-            else:
-                reaction_string += ' --> '
-        else:
-            reaction_string += ' <=> '
-        reaction_string += ' + '.join(product_bits)
-        return reaction_string
-
-    def check_mass_balance(self):
-        """Compute mass and charge balance for the reaction
-
-        returns a dict of {element: amount} for unbalanced elements.
-        "charge" is treated as an element in this dict
-        This should be empty for balanced reactions.
-        """
-        reaction_element_dict = defaultdict(int)
-        for metabolite, coefficient in self._metabolites.items():
-            if metabolite.charge is not None:
-                reaction_element_dict["charge"] += \
-                    coefficient * metabolite.charge
-            if metabolite.elements is None:
-                raise ValueError("No elements found in metabolite %s"
-                                 % metabolite.id)
-            for element, amount in iteritems(metabolite.elements):
-                reaction_element_dict[element] += coefficient * amount
-        # filter out 0 values
-        return {k: v for k, v in iteritems(reaction_element_dict) if v != 0}
-
-    def get_compartments(self):
-        """lists compartments the metabolites are in"""
-        return list({x.compartment for x in self._metabolites})
-
-    def _associate_gene(self, cobra_gene):
-        """Associates a cobra.Gene object with a cobra.Reaction.
-
-        cobra_gene : :class:`~cobra.core.Gene.Gene`
-
-        """
-        self._genes.add(cobra_gene)
-        cobra_gene._reaction.add(self)
-        cobra_gene._model = self._model
-
-    def _dissociate_gene(self, cobra_gene):
-        """Dissociates a cobra.Gene object with a cobra.Reaction.
-
-        cobra_gene : :class:`~cobra.core.Gene.Gene`
-
-        """
-        self._genes.discard(cobra_gene)
-        cobra_gene._reaction.discard(self)
-
-    def knock_out(self):
-        """Change the upper and lower bounds of the reaction to 0."""
-        self.lower_bound = 0
-        self.upper_bound = 0
-
-    def build_reaction_from_string(self, reaction_str, verbose=True,
-                                   fwd_arrow=None, rev_arrow=None,
-                                   reversible_arrow=None, term_split="+"):
-        """Builds reaction from reaction equation reaction_str using parser
-
-        Takes a string and using the specifications supplied in the optional
-        arguments infers a set of metabolites, metabolite compartments and
-        stoichiometries for the reaction.  It also infers the reversibility
-        of the reaction from the reaction arrow.
-
-        Args:
-            reaction_str: a string containing a reaction formula (equation)
-            verbose: Boolean setting verbosity of function
-                (optional, default=True)
-            fwd_arrow: re.compile for forward irreversible reaction arrows
-                (optional, default=_forward_arrow_finder)
-            reverse_arrow: re.compile for backward irreversible reaction arrows
-                (optional, default=_reverse_arrow_finder)
-            fwd_arrow: re.compile for reversible reaction arrows
-                (optional, default=_reversible_arrow_finder)
-            term_split: String dividing individual metabolite entries
-                (optional, default='+')
-        """
-        # set the arrows
-        forward_arrow_finder = _forward_arrow_finder if fwd_arrow is None \
-            else re.compile(re.escape(fwd_arrow))
-        reverse_arrow_finder = _reverse_arrow_finder if rev_arrow is None \
-            else re.compile(re.escape(rev_arrow))
-        reversible_arrow_finder = _reversible_arrow_finder \
-            if reversible_arrow is None \
-            else re.compile(re.escape(reversible_arrow))
-        if self._model is None:
-            warn("no model found")
-            model = None
-        else:
-            model = self._model
-        found_compartments = compartment_finder.findall(reaction_str)
-        if len(found_compartments) == 1:
-            compartment = found_compartments[0]
-            reaction_str = compartment_finder.sub("", reaction_str)
-        else:
-            compartment = ""
-
-        # reversible case
-        arrow_match = reversible_arrow_finder.search(reaction_str)
-        if arrow_match is not None:
-            self.lower_bound = -1000
-            self.upper_bound = 1000
-        else:  # irreversible
-            # try forward
-            arrow_match = forward_arrow_finder.search(reaction_str)
-            if arrow_match is not None:
-                self.upper_bound = 1000
-                self.lower_bound = 0
-            else:
-                # must be reverse
-                arrow_match = reverse_arrow_finder.search(reaction_str)
-                if arrow_match is None:
-                    raise ValueError("no suitable arrow found in '%s'" %
-                                     reaction_str)
-                else:
-                    self.upper_bound = 0
-                    self.lower_bound = -1000
-        reactant_str = reaction_str[:arrow_match.start()].strip()
-        product_str = reaction_str[arrow_match.end():].strip()
-
-        self.clear_metabolites()
-
-        for substr, factor in ((reactant_str, -1), (product_str, 1)):
-            if len(substr) == 0:
-                continue
-            for term in substr.split(term_split):
-                term = term.strip()
-                if term.lower() == "nothing":
-                    continue
-                if " " in term:
-                    num_str, met_id = term.split()
-                    num = float(num_str.lstrip("(").rstrip(")")) * factor
-                else:
-                    met_id = term
-                    num = factor
-                met_id += compartment
-                try:
-                    met = model.metabolites.get_by_id(met_id)
-                except KeyError:
-                    if verbose:
-                        print("unknown metabolite '%s' created" % met_id)
-                    met = Metabolite(met_id)
-                self.add_metabolites({met: num})
diff --git a/cobra/core/Solution.py b/cobra/core/Solution.py
deleted file mode 100644
index c7ca72f..0000000
--- a/cobra/core/Solution.py
+++ /dev/null
@@ -1,39 +0,0 @@
-class Solution(object):
-    """Stores the solution from optimizing a cobra.Model. This is
-    used to provide a single interface to results from different
-    solvers that store their values in different ways.
-
-    f: The objective value
-
-    solver: A string indicating which solver package was used.
-
-    x: List or Array of the values from the primal.
-
-    x_dict: A dictionary of reaction ids that maps to the primal values.
-
-    y: List or Array of the values from the dual.
-
-    y_dict: A dictionary of reaction ids that maps to the dual values.
-
-    """
-
-    def __init__(self, f, x=None,
-                 x_dict=None, y=None, y_dict=None,
-                 solver=None, the_time=0, status='NA'):
-        self.solver = solver
-        self.f = f
-        self.x = x
-        self.x_dict = x_dict
-        self.status = status
-        self.y = y
-        self.y_dict = y_dict
-
-    def dress_results(self, model):
-        """.. warning :: deprecated"""
-        from warnings import warn
-        warn("unnecessary to call this deprecated function")
-
-    def __repr__(self):
-        if self.f is None:
-            return "<Solution '%s' at 0x%x>" % (self.status, id(self))
-        return "<Solution %.2f at 0x%x>" % (self.f, id(self))
diff --git a/cobra/core/__init__.py b/cobra/core/__init__.py
index 24e7859..f23d109 100644
--- a/cobra/core/__init__.py
+++ b/cobra/core/__init__.py
@@ -1,21 +1,12 @@
-from .DictList import DictList
-from .Object import Object
-from .Gene import Gene
-from .Metabolite import Metabolite
-from .Reaction import Reaction
-from .Solution import Solution
-from .Model import Model
-from .Species import Species
+# -*- coding: utf-8 -*-
 
-try:
-    import scipy
-except:
-    scipy = None
+from __future__ import absolute_import
 
-if scipy:
-    from .ArrayBasedModel import ArrayBasedModel
-else:
-    from warnings import warn
-    warn("ArrayBasedModel requires scipy")
-    del warn
-del scipy
+from cobra.core.dictlist import DictList
+from cobra.core.gene import Gene
+from cobra.core.metabolite import Metabolite
+from cobra.core.model import Model
+from cobra.core.object import Object
+from cobra.core.reaction import Reaction
+from cobra.core.solution import Solution, LegacySolution, get_solution
+from cobra.core.species import Species
diff --git a/cobra/core/ArrayBasedModel.py b/cobra/core/arraybasedmodel.py
similarity index 96%
rename from cobra/core/ArrayBasedModel.py
rename to cobra/core/arraybasedmodel.py
index fdb722b..663ff08 100644
--- a/cobra/core/ArrayBasedModel.py
+++ b/cobra/core/arraybasedmodel.py
@@ -1,11 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 from sys import maxsize
 from warnings import warn
-from six import iteritems
 
 from numpy import array, ndarray
-from scipy.sparse import lil_matrix, dok_matrix
+from scipy.sparse import dok_matrix, lil_matrix
+from six import iteritems
 
-from .Model import Model
+from cobra.core.model import Model
 
 
 class ArrayBasedModel(Model):
@@ -27,6 +31,10 @@ class ArrayBasedModel(Model):
             Specifies which type of backend matrix to use for S.
 
         """
+        warn("ArrayBasedModel is deprecated, use "
+             "`cobra.util.array.create_stoichiometric_matrix` instead",
+             DeprecationWarning)
+
         if deepcopy_model and isinstance(description, Model):
             description = description.copy()
         Model.__init__(self, description)
@@ -360,7 +368,8 @@ class SMatrix_dok(dok_matrix):
             else:  # setting 0 means metabolites should be removed
                 metabolite = self._model.metabolites[index[0]]
                 if metabolite in reaction._metabolites:
-                    reaction.pop(metabolite)
+                    reaction.subtract_metabolites(
+                        {metabolite: reaction.get_coefficient(metabolite)})
 
     def tolil(self):
         new = SMatrix_lil(dok_matrix.tolil(self), model=self._model)
@@ -392,7 +401,8 @@ class SMatrix_lil(lil_matrix):
             for reaction in reactions:
                 to_remove = met_set.intersection(reaction._metabolites)
                 for i in to_remove:
-                    reaction.pop(i)
+                    reaction.subtract_metabolites(
+                        {i: reaction.get_coefficient(i)})
         else:  # add metabolites
             met_dict = {met: value for met in metabolites}
             for reaction in reactions:
diff --git a/cobra/core/DictList.py b/cobra/core/dictlist.py
similarity index 72%
rename from cobra/core/DictList.py
rename to cobra/core/dictlist.py
index 6d587df..364a6a6 100644
--- a/cobra/core/DictList.py
+++ b/cobra/core/dictlist.py
@@ -1,11 +1,12 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 import re
 from itertools import islice
-from six import string_types, iteritems, PY3
 
-try:
-    from numpy import bool_
-except:
-    bool_ = bool
+from numpy import bool_
+from six import PY3, iteritems, string_types
 
 
 class DictList(list):
@@ -14,12 +15,16 @@ class DictList(list):
     This object behaves like a list, but has the O(1) speed
     benefits of a dict when looking up elements by their id.
 
+    Parameters
+    ----------
+    *args : iterable
+        iterable as single argument to create new DictList from
     """
 
     def __init__(self, *args):
         if len(args) > 2:
             raise TypeError("takes at most 1 argument (%d given)" % len(args))
-        list.__init__(self)
+        super(DictList, self).__init__(self)
         self._dict = {}
         if len(args) == 1:
             other = args[0]
@@ -49,44 +54,95 @@ class DictList(list):
         return list.__getitem__(self, self._dict[id])
 
     def list_attr(self, attribute):
-        """return a list of the given attribute for every object
-
-        """
+        """return a list of the given attribute for every object"""
         return [getattr(i, attribute) for i in self]
 
-    def query(self, search_function, attribute="id"):
-        """query the list
+    def get_by_any(self, iterable):
+        """
+        Get a list of members using several different ways of indexing
+
+        Parameters
+        ----------
+        iterable : list (if not, turned into single element list)
+            list where each element is either int (referring to an index in
+            in this DictList), string (a id of a member in this DictList) or
+            member of this DictList for pass-through
+
+        Returns
+        -------
+        list
+            a list of members
+        """
+        def get_item(item):
+            if isinstance(item, int):
+                return self[item]
+            elif isinstance(item, string_types):
+                return self.get_by_id(item)
+            elif item in self:
+                return item
+            else:
+                raise TypeError("item in iterable cannot be '%s'" % type(item))
+
+        if not isinstance(iterable, list):
+            iterable = [iterable]
+        return [get_item(item) for item in iterable]
+
+    def query(self, search_function, attribute=None):
+        """Query the list
+
+        Parameters
+        ----------
+        search_function : a string, regular expression or function
+            used to find the matching elements in the list.
+
+            - a regular expression (possibly compiled), in which case the
+            given attribute of the object should match the regular expression.
+
+            - a function which takes one argument and returns True for
+            desired values
+        attribute : string or None
+            the name attribute of the object to passed as argument to the
+            `search_function`. If this is None, the object itself is used.
+
+        Returns
+        -------
+        DictList
+            a new list of objects which match the query
+
+        Examples
+        --------
+        >>> import cobra.test
+        >>> model = cobra.test.create_test_model('textbook')
+        >>> model.reactions.query(lambda x: x.boundary)
+        >>> import re
+        >>> regex = re.compile('^g', flags=re.IGNORECASE)
+        >>> model.metabolites.query(regex, attribute='name')
+        """
+        def select_attribute(x):
+            if attribute is None:
+                return x
+            else:
+                return getattr(x, attribute)
 
-        search_function: used to select which objects to return
-            * a string, in which case any object.attribute containing
-              the string will be returned
+        try:
+            # if the search_function is a regular expression
+            regex_searcher = re.compile(search_function)
 
-            * a compiled regular expression
+            if attribute is not None:
+                matches = (
+                    i for i in self if
+                    regex_searcher.findall(select_attribute(i)) != [])
 
-            * a function which takes one argument and returns True
-              for desired values
+            else:
+                # Don't regex on objects
+                matches = (
+                    i for i in self if
+                    regex_searcher.findall(getattr(i, 'id')) != [])
 
-        attribute: the attribute to be searched for (default is 'id').
-                   If this is None, the object itself is used.
+        except TypeError:
+            matches = (
+                i for i in self if search_function(select_attribute(i)))
 
-        returns: a list of objects which match the query
-        """
-        if attribute is None:
-            def select_attribute(x):
-                return x
-        else:
-            def select_attribute(x):
-                return getattr(x, attribute)
-
-        # if the search_function is a regular expression
-        if isinstance(search_function, string_types):
-            search_function = re.compile(search_function)
-        if hasattr(search_function, "findall"):
-            matches = (i for i in self
-                       if search_function.findall(select_attribute(i)) != [])
-        else:
-            matches = (i for i in self
-                       if search_function(select_attribute(i)))
         results = self.__class__()
         results._extend_nocheck(matches)
         return results
@@ -143,7 +199,7 @@ class DictList(list):
         """extends without checking for uniqueness
 
         This function should only be used internally by DictList when it
-        can guarentee elements are already unique (as in when coming from
+        can guarantee elements are already unique (as in when coming from
         self or other DictList). It will be faster because it skips these
         checks.
 
@@ -158,10 +214,39 @@ class DictList(list):
                                 current_length):
             _dict[obj.id] = i
 
+    def __sub__(self, other):
+        """x.__sub__(y) <==> x - y
+
+        Parameters
+        ----------
+        other : iterable
+            other must contain only unique id's present in the list
+        """
+        total = DictList()
+        total.extend(self)
+        for item in other:
+            total.remove(item)
+        return total
+
+    def __isub__(self, other):
+        """x.__sub__(y) <==> x -= y
+
+        Parameters
+        ----------
+        other : iterable
+            other must contain only unique id's present in the list
+        """
+
+        for item in other:
+            self.remove(item)
+        return self
+
     def __add__(self, other):
         """x.__add__(y) <==> x + y
 
-        other: iterable
+        Parameters
+        ----------
+        other : iterable
             other must contain only unique id's which do not intersect
             with self
 
@@ -174,7 +259,9 @@ class DictList(list):
     def __iadd__(self, other):
         """x.__iadd__(y) <==> x += y
 
-        other: iterable
+        Parameters
+        ----------
+        other : iterable
             other must contain only unique id's whcih do not intersect
             with self
 
@@ -188,7 +275,7 @@ class DictList(list):
     def __getstate__(self):
         """gets internal state
 
-        This is only provided for backwards compatibilty so older
+        This is only provided for backwards compatibility so older
         versions of cobrapy can load pickles generated with cobrapy. In
         reality, the "_dict" state is ignored when loading a pickle"""
         return {"_dict": self._dict}
@@ -266,6 +353,10 @@ class DictList(list):
                 _dict[i] = j - 1
         return value
 
+    def add(self, x):
+        """Opposite of `remove`. Mirrors set.add"""
+        self.extend([x])
+
     def remove(self, x):
         """.. warning :: Internal use only"""
         # Each item is unique in the list which allows this
@@ -357,7 +448,7 @@ class DictList(list):
             return DictList.get_by_id(self, attr)
         except KeyError:
             raise AttributeError("DictList has no attribute or entry %s" %
-                                 (attr))
+                                 attr)
 
     def __dir__(self):
         # override this to allow tab complete of items by their id
diff --git a/cobra/core/Formula.py b/cobra/core/formula.py
similarity index 88%
rename from cobra/core/Formula.py
rename to cobra/core/formula.py
index 005bb49..9fcf603 100644
--- a/cobra/core/Formula.py
+++ b/cobra/core/formula.py
@@ -1,7 +1,11 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 import re
 from warnings import warn
 
-from .Object import Object
+from cobra.core.object import Object
 
 # Numbers are not required because of the |(?=[A-Z])? block. See the
 # discussion in https://github.com/opencobra/cobrapy/issues/128 for
@@ -12,8 +16,10 @@ element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)")
 class Formula(Object):
     """Describes a Chemical Formula
 
-    A legal formula string contains only letters and numbers.
-
+    Parameters
+    ---------
+    formula : string
+        A legal formula string contains only letters and numbers.
     """
     def __init__(self, formula=None):
         Object.__init__(self, formula)
@@ -25,8 +31,15 @@ class Formula(Object):
     def __add__(self, other_formula):
         """Combine two molecular formulas.
 
-        other_formula: cobra.Formula or str of a chemical Formula.
+        Parameters
+        ----------
+        other_formula : Formula, str
+            string for a chemical formula
 
+        Returns
+        -------
+        Formula
+           The combined formula
         """
         return Formula(self.formula + other_formula.formula)
 
@@ -67,12 +80,18 @@ class Formula(Object):
 
     @property
     def weight(self):
-        """Calculate the formula weight"""
+        """Calculate the mol mass of the compound
+
+        Returns
+        -------
+        float
+            the mol mass
+        """
         try:
             return sum([count * elements_and_molecular_weights[element]
                         for element, count in self.elements.items()])
         except KeyError as e:
-            warn("The element %s does not appear in the peridic table" % e)
+            warn("The element %s does not appear in the periodic table" % e)
 
 
 elements_and_molecular_weights = {
diff --git a/cobra/core/Gene.py b/cobra/core/gene.py
similarity index 63%
rename from cobra/core/Gene.py
rename to cobra/core/gene.py
index 797c24d..4ea1f1b 100644
--- a/cobra/core/Gene.py
+++ b/cobra/core/gene.py
@@ -1,11 +1,16 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 import re
-from warnings import warn
-from ast import parse as ast_parse, Name, And, Or, BitOr, BitAnd, \
-    BoolOp, Expression, NodeTransformer
+from ast import parse as ast_parse
+from ast import (
+    And, BitAnd, BitOr, BoolOp, Expression, Name, NodeTransformer, Or)
 from keyword import kwlist
+from warnings import warn
 
-from .Species import Species
-
+from cobra.core.species import Species
+from cobra.util import resettable
 
 keywords = list(kwlist)
 keywords.remove("and")
@@ -30,14 +35,22 @@ replacements = (
 def ast2str(expr, level=0, names=None):
     """convert compiled ast to gene_reaction_rule str
 
-    expr: str of a gene reaction rule
-
-    level: internal use only
-
-    names: optional dict of {Gene.id: Gene.name}
-        Use this to get a rule str which uses names instead. This
+    Parameters
+    ----------
+    expr : str
+        string for a gene reaction rule, e.g "a and b"
+    level : int
+        internal use only
+    names : dict
+        Dict where each element id a gene identifier and the value is the
+        gene name. Use this to get a rule str which uses names instead. This
         should be done for display purposes only. All gene_reaction_rule
         strings which are computed with should use the id.
+
+    Returns
+    ------
+    string
+        The gene reaction rule
     """
     if isinstance(expr, Expression):
         return ast2str(expr.body, 0, names) \
@@ -62,7 +75,21 @@ def ast2str(expr, level=0, names=None):
 
 
 def eval_gpr(expr, knockouts):
-    """evaluate compiled ast of gene_reaction_rule with knockouts"""
+    """evaluate compiled ast of gene_reaction_rule with knockouts
+
+    Parameters
+    ----------
+    expr : Expression
+        The ast of the gene reaction rule
+    knockouts : DictList, set
+        Set of genes that are knocked out
+
+    Returns
+    -------
+    bool
+        True if the gene reaction rule is true with the given knockouts
+        otherwise false
+    """
     if isinstance(expr, Expression):
         return eval_gpr(expr.body, knockouts)
     elif isinstance(expr, Name):
@@ -86,6 +113,7 @@ class GPRCleaner(NodeTransformer):
 
     Parts of the tree are rewritten to allow periods in gene ID's and
     bitwise boolean operations"""
+
     def __init__(self):
         NodeTransformer.__init__(self)
         self.gene_set = set()
@@ -113,7 +141,16 @@ class GPRCleaner(NodeTransformer):
 def parse_gpr(str_expr):
     """parse gpr into AST
 
-    returns: (ast_tree, {gene_ids})"""
+    Parameters
+    ----------
+    str_expr : string
+        string with the gene reaction rule to parse
+
+    Returns
+    -------
+    tuple
+        elements ast_tree and gene_ids as a set
+    """
     str_expr = str_expr.strip()
     if len(str_expr) == 0:
         return None, set()
@@ -130,28 +167,64 @@ def parse_gpr(str_expr):
 
 
 class Gene(Species):
+    """A Gene in a cobra model
+
+    Parameters
+    ----------
+    id : string
+        The identifier to associate the gene with
+    name: string
+        A longer human readable name for the gene
+    functional: bool
+        Indicates whether the gene is functional.  If it is not functional
+        then it cannot be used in an enzyme complex nor can its products be
+        used.
+    """
 
     def __init__(self, id=None, name="", functional=True):
+        Species.__init__(self, id=id, name=name)
+        self._functional = functional
+
+    @property
+    def functional(self):
+        """A flag indicating if the gene is functional.
+
+        Changing the flag is reverted upon exit if executed within the model
+        as context.
         """
-        id: A string.
+        return self._functional
 
-        name: String.  A human readable name.
+    @functional.setter
+    @resettable
+    def functional(self, value):
+        if not isinstance(value, bool):
+            raise ValueError('expected boolean')
+        self._functional = value
 
-        functional: Boolean.  Indicate whether the gene is functional.  If it
-        is not functional then it cannot be used in an enzyme complex nor
-        can its products be used.
+    def knock_out(self):
+        """Knockout gene by marking it as non-functional and setting all
+        associated reactions bounds to zero.
 
+        The change is reverted upon exit if executed within the model as
+        context.
         """
-        Species.__init__(self, id=id, name=name)
-        self.functional = functional
+        self.functional = False
+        for reaction in self.reactions:
+            if not reaction.functional:
+                reaction.bounds = (0, 0)
 
     def remove_from_model(self, model=None,
                           make_dependent_reactions_nonfunctional=True):
         """Removes the association
 
-        make_dependent_reactions_nonfunctional: Boolean.  If True then replace
-        the gene with 'False' in the gene association, else replace the gene
-        with 'True'
+        Parameters
+        ----------
+        model : cobra model
+           The model to remove the gene from
+        make_dependent_reactions_nonfunctional : bool
+           If True then replace the gene with 'False' in the gene
+           association, else replace the gene with 'True'
+
 
         .. deprecated :: 0.4
             Use cobra.manipulation.delete_model_genes to simulate knockouts
@@ -195,3 +268,25 @@ class Gene(Species):
                 the_reaction.lower_bound = 0
                 the_reaction.upper_bound = 0
         self._reaction.clear()
+
+    def _repr_html_(self):
+        return """
+        <table>
+            <tr>
+                <td><strong>Gene identifier</strong></td><td>{id}</td>
+            </tr><tr>
+                <td><strong>Name</strong></td><td>{name}</td>
+            </tr><tr>
+                <td><strong>Memory address</strong></td>
+                <td>{address}</td>
+            </tr><tr>
+                <td><strong>Functional</strong></td><td>{functional}</td>
+            </tr><tr>
+                <td><strong>In {n_reactions} reaction(s)</strong></td><td>
+                    {reactions}</td>
+            </tr>
+        </table>""".format(id=self.id, name=self.name,
+                           functional=self.functional,
+                           address='0x0%x' % id(self),
+                           n_reactions=len(self.reactions),
+                           reactions=', '.join(r.id for r in self.reactions))
diff --git a/cobra/core/metabolite.py b/cobra/core/metabolite.py
new file mode 100644
index 0000000..8f78e39
--- /dev/null
+++ b/cobra/core/metabolite.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import re
+from warnings import warn
+
+from six import iteritems
+from future.utils import raise_from, raise_with_traceback
+
+from cobra.exceptions import OptimizationError
+from cobra.core.formula import elements_and_molecular_weights
+from cobra.core.species import Species
+from cobra.util.solver import check_solver_status
+
+
+# Numbers are not required because of the |(?=[A-Z])? block. See the
+# discussion in https://github.com/opencobra/cobrapy/issues/128 for
+# more details.
+element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)")
+
+
+class Metabolite(Species):
+    """Metabolite is a class for holding information regarding
+    a metabolite in a cobra.Reaction object.
+
+    Parameters
+    ----------
+    id : str
+        the identifier to associate with the metabolite
+    formula : str
+        Chemical formula (e.g. H2O)
+    name : str
+        A human readable name.
+    charge : float
+       The charge number of the metabolite
+    compartment: str or None
+       Compartment of the metabolite.
+    """
+
+    def __init__(self, id=None, formula=None, name="",
+                 charge=None, compartment=None):
+        Species.__init__(self, id, name)
+        self.formula = formula
+        # because in a Model a metabolite may participate in multiple Reactions
+        self.compartment = compartment
+        self.charge = charge
+
+        self._constraint_sense = 'E'
+        self._bound = 0.
+
+    def _set_id_with_model(self, value):
+        if value in self.model.metabolites:
+            raise ValueError("The model already contains a metabolite with "
+                             "the id:", value)
+        self.model.constraints[self.id].name = value
+        self._id = value
+        self.model.metabolites._generate_index()
+
+    @property
+    def constraint(self):
+        """Get the constraints associated with this metabolite from the solve
+
+        Returns
+        -------
+        optlang.<interface>.Constraint
+            the optlang constraint for this metabolite
+        """
+        if self.model is not None:
+            return self.model.constraints[self.id]
+
+    @property
+    def elements(self):
+        """ Dictionary of elements as keys and their count in the metabolite
+        as integer. When set, the `formula` property is update accordingly """
+        tmp_formula = self.formula
+        if tmp_formula is None:
+            return {}
+        # necessary for some old pickles which use the deprecated
+        # Formula class
+        tmp_formula = str(self.formula)
+        # commonly occuring characters in incorrectly constructed formulas
+        if "*" in tmp_formula:
+            warn("invalid character '*' found in formula '%s'" % self.formula)
+            tmp_formula = tmp_formula.replace("*", "")
+        if "(" in tmp_formula or ")" in tmp_formula:
+            warn("invalid formula (has parenthesis) in '%s'" % self.formula)
+            return None
+        composition = {}
+        parsed = element_re.findall(tmp_formula)
+        for (element, count) in parsed:
+            if count == '':
+                count = 1
+            else:
+                try:
+                    count = float(count)
+                    int_count = int(count)
+                    if count == int_count:
+                        count = int_count
+                    else:
+                        warn("%s is not an integer (in formula %s)" %
+                             (count, self.formula))
+                except ValueError:
+                    warn("failed to parse %s (in formula %s)" %
+                         (count, self.formula))
+                    return None
+            if element in composition:
+                composition[element] += count
+            else:
+                composition[element] = count
+        return composition
+
+    @elements.setter
+    def elements(self, elements_dict):
+        def stringify(element, number):
+            return element if number == 1 else element + str(number)
+
+        self.formula = ''.join(stringify(e, n) for e, n in
+                               sorted(iteritems(elements_dict)))
+
+    @property
+    def formula_weight(self):
+        """Calculate the formula weight"""
+        try:
+            return sum([count * elements_and_molecular_weights[element]
+                        for element, count in self.elements.items()])
+        except KeyError as e:
+            warn("The element %s does not appear in the peridic table" % e)
+
+    @property
+    def y(self):
+        """The shadow price for the metabolite in the most recent solution
+
+        Shadow prices are computed from the dual values of the bounds in
+        the solution.
+
+        """
+        warn("Please use metabolite.shadow_price instead.", DeprecationWarning)
+        return self.shadow_price
+
+    @property
+    def shadow_price(self):
+        """
+        The shadow price in the most recent solution.
+
+        Shadow price is the dual value of the corresponding constraint in the
+        model.
+
+        Warnings
+        --------
+        * Accessing shadow prices through a `Solution` object is the safer,
+          preferred, and only guaranteed to be correct way. You can see how to
+          do so easily in the examples.
+        * Shadow price is retrieved from the currently defined
+          `self._model.solver`. The solver status is checked but there are no
+          guarantees that the current solver state is the one you are looking
+          for.
+        * If you modify the underlying model after an optimization, you will
+          retrieve the old optimization values.
+
+        Raises
+        ------
+        RuntimeError
+            If the underlying model was never optimized beforehand or the
+            metabolite is not part of a model.
+        OptimizationError
+            If the solver status is anything other than 'optimal'.
+
+        Examples
+        --------
+        >>> import cobra
+        >>> import cobra.test
+        >>> model = cobra.test.create_test_model("textbook")
+        >>> solution = model.optimize()
+        >>> model.metabolites.glc__D_e.shadow_price
+        -0.09166474637510488
+        >>> solution.shadow_prices.glc__D_e
+        -0.091664746375104883
+        """
+        try:
+            check_solver_status(self._model.solver.status)
+            return self._model.constraints[self.id].dual
+        except AttributeError:
+            raise RuntimeError(
+                "metabolite '{}' is not part of a model".format(self.id))
+        # Due to below all-catch, which sucks, need to reraise these.
+        except (RuntimeError, OptimizationError) as err:
+            raise_with_traceback(err)
+        # Would love to catch CplexSolverError and GurobiError here.
+        except Exception as err:
+            raise_from(OptimizationError(
+                "Likely no solution exists. Original solver message: {}."
+                "".format(str(err))), err)
+
+    def remove_from_model(self, destructive=False):
+        """Removes the association from self.model
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        destructive : bool
+            If False then the metabolite is removed from all
+            associated reactions.  If True then all associated
+            reactions are removed from the Model.
+        """
+        self._model.remove_metabolites(self, destructive)
+
+    def summary(self, solution=None, threshold=0.01, fva=False,
+                floatfmt='.3g'):
+        """Print a summary of the reactions which produce and consume this
+        metabolite.
+
+        This method requires the model for which this metabolite is a part
+        to be solved.
+
+        Parameters
+        ----------
+        solution : cobra.core.Solution
+            A previously solved model solution to use for generating the
+            summary. If none provided (default), the summary method will
+            resolve the model. Note that the solution object must match the
+            model, i.e., changes to the model such as changed bounds,
+            added or removed reactions are not taken into account by this
+            method.
+
+        threshold : float
+            a value below which to ignore reaction fluxes
+
+        fva : float (0->1), or None
+            Whether or not to include flux variability analysis in the output.
+            If given, fva should be a float between 0 and 1, representing the
+            fraction of the optimum objective to be searched.
+
+        floatfmt : string
+            format method for floats, passed to tabulate. Default is '.3g'.
+        """
+        from cobra.flux_analysis.summary import metabolite_summary
+        return metabolite_summary(self, solution=solution, threshold=threshold,
+                                  fva=fva, floatfmt=floatfmt)
+
+    def _repr_html_(self):
+        return """
+        <table>
+            <tr>
+                <td><strong>Metabolite identifier</strong></td><td>{id}</td>
+            </tr><tr>
+                <td><strong>Name</strong></td><td>{name}</td>
+            </tr><tr>
+                <td><strong>Memory address</strong></td>
+                <td>{address}</td>
+            </tr><tr>
+                <td><strong>Formula</strong></td><td>{formula}</td>
+            </tr><tr>
+                <td><strong>Compartment</strong></td><td>{compartment}</td>
+            </tr><tr>
+                <td><strong>In {n_reactions} reaction(s)</strong></td><td>
+                    {reactions}</td>
+            </tr>
+        </table>""".format(id=self.id, name=self.name, formula=self.formula,
+                           address='0x0%x' % id(self),
+                           compartment=self.compartment,
+                           n_reactions=len(self.reactions),
+                           reactions=', '.join(r.id for r in self.reactions))
diff --git a/cobra/core/model.py b/cobra/core/model.py
new file mode 100644
index 0000000..fb0be9a
--- /dev/null
+++ b/cobra/core/model.py
@@ -0,0 +1,1070 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import types
+import logging
+from copy import copy, deepcopy
+from functools import partial
+from warnings import warn
+
+import optlang
+import six
+import sympy
+from six import iteritems, string_types
+from sympy import S
+
+from cobra.core.dictlist import DictList
+from cobra.core.object import Object
+from cobra.core.reaction import separate_forward_and_reverse_bounds, Reaction
+from cobra.core.solution import get_solution
+from cobra.solvers import optimize
+from cobra.util.context import HistoryManager, resettable, get_context
+from cobra.util.solver import (
+    SolverNotFound, get_solver_name, interface_to_str, set_objective, solvers,
+    add_cons_vars_to_problem, remove_cons_vars_from_problem, choose_solver,
+    check_solver_status, assert_optimal)
+from cobra.util.util import AutoVivification
+
+LOGGER = logging.getLogger(__name__)
+
+
+class Model(Object):
+    """Class representation for a cobra model
+
+    Parameters
+    ----------
+    id_or_model : Model, string
+        Either an existing Model object in which case a new model object is
+        instantiated with the same properties as the original model,
+        or a the identifier to associate with the model as a string.
+    name : string
+        Human readable name for the model
+
+    Attributes
+    ----------
+    reactions : DictList
+        A DictList where the key is the reaction identifier and the value a
+        Reaction
+    metabolites : DictList
+        A DictList where the key is the metabolite identifier and the value a
+        Metabolite
+    genes : DictList
+        A DictList where the key is the gene identifier and the value a
+        Gene
+    solution : Solution
+        The last obtained solution from optimizing the model.
+    """
+
+    def __setstate__(self, state):
+        """Make sure all cobra.Objects in the model point to the model.
+        """
+        self.__dict__.update(state)
+        for y in ['reactions', 'genes', 'metabolites']:
+            for x in getattr(self, y):
+                x._model = self
+                if y == 'reactions':
+                    x._reset_var_cache()
+        if not hasattr(self, "name"):
+            self.name = None
+
+    def __getstate__(self):
+        """Get state for serialization.
+
+        Ensures that the context stack is cleared prior to serialization,
+        since partial functions cannot be pickled reliably.
+        """
+        odict = self.__dict__.copy()
+        odict['_contexts'] = []
+        return odict
+
+    def __init__(self, id_or_model=None, name=None):
+        if isinstance(id_or_model, Model):
+            Object.__init__(self, name=name)
+            self.__setstate__(id_or_model.__dict__)
+            if not hasattr(self, "name"):
+                self.name = None
+            self._solver = id_or_model.solver
+        else:
+            Object.__init__(self, id_or_model, name=name)
+            self._trimmed = False
+            self._trimmed_genes = []
+            self._trimmed_reactions = {}
+            self.genes = DictList()
+            self.reactions = DictList()  # A list of cobra.Reactions
+            self.metabolites = DictList()  # A list of cobra.Metabolites
+            # genes based on their ids {Gene.id: Gene}
+            self.compartments = dict()
+            self._contexts = []
+
+            # from cameo ...
+
+            # if not hasattr(self, '_solver'):  # backwards compatibility
+            # with older cobrapy pickles?
+            interface = solvers[get_solver_name()]
+            self._solver = interface.Model()
+            self._solver.objective = interface.Objective(S.Zero)
+            self._populate_solver(self.reactions, self.metabolites)
+
+    @property
+    def solver(self):
+        """Get or set the attached solver instance.
+
+        The associated the solver object, which manages the interaction with
+        the associated solver, e.g. glpk.
+
+        This property is useful for accessing the optimization problem
+        directly and to define additional non-metabolic constraints.
+
+        Examples
+        --------
+        >>> import cobra.test
+        >>> model = cobra.test.create_test_model("textbook")
+        >>> new = model.problem.Constraint(model.objective.expression,
+        >>> lb=0.99)
+        >>> model.solver.add(new)
+        """
+        return self._solver
+
+    @solver.setter
+    @resettable
+    def solver(self, value):
+        not_valid_interface = SolverNotFound(
+            '%s is not a valid solver interface. Pick from %s, or specify an '
+            'optlang interface (e.g. optlang.glpk_interface).' % (
+                value, list(solvers.keys())))
+        if isinstance(value, six.string_types):
+            try:
+                interface = solvers[interface_to_str(value)]
+            except KeyError:
+                raise not_valid_interface
+        elif isinstance(value, types.ModuleType) and hasattr(value, 'Model'):
+            interface = value
+        elif isinstance(value, optlang.interface.Model):
+            interface = value.interface
+        else:
+            raise not_valid_interface
+
+        # Do nothing if the solver did not change
+        if self.problem == interface:
+            return
+
+        for reaction in self.reactions:
+            reaction._reset_var_cache()
+        self._solver = interface.Model.clone(self._solver)
+
+    @property
+    def description(self):
+        warn("description deprecated", DeprecationWarning)
+        return self.name if self.name is not None else ""
+
+    @description.setter
+    def description(self, value):
+        self.name = value
+        warn("description deprecated", DeprecationWarning)
+
+    def get_metabolite_compartments(self):
+        """Return all metabolites' compartments."""
+        return {met.compartment for met in self.metabolites
+                if met.compartment is not None}
+
+    @property
+    def medium(self):
+
+        def is_active(reaction):
+            """Determine if a boundary reaction permits flux towards creating
+            metabolites
+            """
+
+            return ((bool(reaction.products) and (reaction.upper_bound > 0)) or
+                    (bool(reaction.reactants) and (reaction.lower_bound < 0)))
+
+        def get_active_bound(reaction):
+            """For an active boundary reaction, return the relevant bound"""
+            if reaction.reactants:
+                return -reaction.lower_bound
+            elif reaction.products:
+                return reaction.upper_bound
+
+        return {rxn.id: get_active_bound(rxn) for rxn in self.exchanges
+                if is_active(rxn)}
+
+    @medium.setter
+    def medium(self, medium):
+        """Get or set the constraints on the model exchanges.
+
+        `model.medium` returns a dictionary of the bounds for each of the
+        boundary reactions, in the form of `{rxn_id: bound}`, where `bound`
+        specifies the absolute value of the bound in direction of metabolite
+        creation (i.e., lower_bound for `met <--`, upper_bound for `met -->`)
+
+        Parameters
+        ----------
+        medium: dictionary-like
+            The medium to initialize. medium should be a dictionary defining
+            `{rxn_id: bound}` pairs.
+
+        """
+
+        def set_active_bound(reaction, bound):
+            if reaction.reactants:
+                reaction.lower_bound = -bound
+            elif reaction.products:
+                reaction.upper_bound = bound
+
+        # Set the given media bounds
+        media_rxns = list()
+        for rxn_id, bound in iteritems(medium):
+            rxn = self.reactions.get_by_id(rxn_id)
+            media_rxns.append(rxn)
+            set_active_bound(rxn, bound)
+
+        boundary_rxns = set(self.exchanges)
+        media_rxns = set(media_rxns)
+
+        # Turn off reactions not present in media
+        for rxn in (boundary_rxns - media_rxns):
+            set_active_bound(rxn, 0)
+
+    def __add__(self, other_model):
+        """Add the content of another model to this model (+).
+
+        The model is copied as a new object, with a new model identifier,
+        and copies of all the reactions in the other model are added to this
+        model. The objective is the sum of the objective expressions for the
+        two models.
+        """
+        warn('use model.merge instead', DeprecationWarning)
+        return self.merge(other_model, objective='sum', inplace=False)
+
+    def __iadd__(self, other_model):
+        """Incrementally add the content of another model to this model (+=).
+
+        Copies of all the reactions in the other model are added to this
+        model. The objective is the sum of the objective expressions for the
+        two models.
+        """
+        warn('use model.merge instead', DeprecationWarning)
+        return self.merge(other_model, objective='sum', inplace=True)
+
+    def copy(self):
+        """Provides a partial 'deepcopy' of the Model.  All of the Metabolite,
+        Gene, and Reaction objects are created anew but in a faster fashion
+        than deepcopy
+        """
+        new = self.__class__()
+        do_not_copy_by_ref = {"metabolites", "reactions", "genes", "notes",
+                              "annotation"}
+        for attr in self.__dict__:
+            if attr not in do_not_copy_by_ref:
+                new.__dict__[attr] = self.__dict__[attr]
+        new.notes = deepcopy(self.notes)
+        new.annotation = deepcopy(self.annotation)
+
+        new.metabolites = DictList()
+        do_not_copy_by_ref = {"_reaction", "_model"}
+        for metabolite in self.metabolites:
+            new_met = metabolite.__class__()
+            for attr, value in iteritems(metabolite.__dict__):
+                if attr not in do_not_copy_by_ref:
+                    new_met.__dict__[attr] = copy(
+                        value) if attr == "formula" else value
+            new_met._model = new
+            new.metabolites.append(new_met)
+
+        new.genes = DictList()
+        for gene in self.genes:
+            new_gene = gene.__class__(None)
+            for attr, value in iteritems(gene.__dict__):
+                if attr not in do_not_copy_by_ref:
+                    new_gene.__dict__[attr] = copy(
+                        value) if attr == "formula" else value
+            new_gene._model = new
+            new.genes.append(new_gene)
+
+        new.reactions = DictList()
+        do_not_copy_by_ref = {"_model", "_metabolites", "_genes"}
+        for reaction in self.reactions:
+            new_reaction = reaction.__class__()
+            for attr, value in iteritems(reaction.__dict__):
+                if attr not in do_not_copy_by_ref:
+                    new_reaction.__dict__[attr] = copy(value)
+            new_reaction._model = new
+            new.reactions.append(new_reaction)
+            # update awareness
+            for metabolite, stoic in iteritems(reaction._metabolites):
+                new_met = new.metabolites.get_by_id(metabolite.id)
+                new_reaction._metabolites[new_met] = stoic
+                new_met._reaction.add(new_reaction)
+            for gene in reaction._genes:
+                new_gene = new.genes.get_by_id(gene.id)
+                new_reaction._genes.add(new_gene)
+                new_gene._reaction.add(new_reaction)
+
+        for reaction in new.reactions:
+            reaction._reset_var_cache()
+        try:
+            new._solver = deepcopy(self.solver)
+            # Cplex has an issue with deep copies
+        except Exception:  # pragma: no cover
+            new._solver = copy(self.solver)  # pragma: no cover
+
+        # it doesn't make sense to retain the context of a copied model so
+        # assign a new empty context
+        new._contexts = list()
+
+        return new
+
+    def add_metabolites(self, metabolite_list):
+        """Will add a list of metabolites to the model object and add new
+        constraints accordingly.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        metabolite_list : A list of `cobra.core.Metabolite` objects
+
+        """
+        if not hasattr(metabolite_list, '__iter__'):
+            metabolite_list = [metabolite_list]
+        if len(metabolite_list) == 0:
+            return None
+
+        # First check whether the metabolites exist in the model
+        metabolite_list = [x for x in metabolite_list
+                           if x.id not in self.metabolites]
+
+        bad_ids = [m for m in metabolite_list
+                   if not isinstance(m.id, string_types) or len(m.id) < 1]
+        if len(bad_ids) != 0:
+            raise ValueError('invalid identifiers in {}'.format(repr(bad_ids)))
+
+        for x in metabolite_list:
+            x._model = self
+        self.metabolites += metabolite_list
+
+        # from cameo ...
+        to_add = []
+        for met in metabolite_list:
+            if met.id not in self.constraints:
+                constraint = self.problem.Constraint(
+                    S.Zero, name=met.id, lb=0, ub=0)
+                to_add += [constraint]
+
+        self.add_cons_vars(to_add)
+
+        context = get_context(self)
+        if context:
+            context(partial(self.metabolites.__isub__, metabolite_list))
+            for x in metabolite_list:
+                # Do we care?
+                context(partial(setattr, x, '_model', None))
+
+    def remove_metabolites(self, metabolite_list, destructive=False):
+        """Remove a list of metabolites from the the object.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        metabolite_list : list
+            A list with `cobra.Metabolite` objects as elements.
+
+        destructive : bool
+            If False then the metabolite is removed from all
+            associated reactions.  If True then all associated
+            reactions are removed from the Model.
+
+        """
+        if not hasattr(metabolite_list, '__iter__'):
+            metabolite_list = [metabolite_list]
+        # Make sure metabolites exist in model
+        metabolite_list = [x for x in metabolite_list
+                           if x.id in self.metabolites]
+        for x in metabolite_list:
+            x._model = None
+
+            if not destructive:
+                for the_reaction in list(x._reaction):
+                    the_coefficient = the_reaction._metabolites[x]
+                    the_reaction.subtract_metabolites({x: the_coefficient})
+
+            else:
+                for x in list(x._reaction):
+                    x.remove_from_model()
+
+        self.metabolites -= metabolite_list
+
+        to_remove = [self.solver.constraints[m.id] for m in metabolite_list]
+        self.remove_cons_vars(to_remove)
+
+        context = get_context(self)
+        if context:
+            context(partial(self.metabolites.__iadd__, metabolite_list))
+            for x in metabolite_list:
+                context(partial(setattr, x, '_model', self))
+
+    def add_reaction(self, reaction):
+        """Will add a cobra.Reaction object to the model, if
+        reaction.id is not in self.reactions.
+
+        Parameters
+        ----------
+        reaction : cobra.Reaction
+            The reaction to add
+
+        Deprecated (0.6). Use `~cobra.Model.add_reactions` instead
+        """
+        warn("add_reaction deprecated. Use add_reactions instead",
+             DeprecationWarning)
+
+        self.add_reactions([reaction])
+
+    def add_boundary(self, metabolite, type="exchange", reaction_id=None,
+                     lb=None, ub=1000.0):
+        """Add a boundary reaction for a given metabolite.
+
+        There are three different types of pre-defined boundary reactions:
+        exchange, demand, and sink reactions.
+        An exchange reaction is a reversible, imbalanced reaction that adds
+        to or removes an extracellular metabolite from the extracellular
+        compartment.
+        A demand reaction is an irreversible reaction that consumes an
+        intracellular metabolite.
+        A sink is similar to an exchange but specifically for intracellular
+        metabolites.
+
+        If you set the reaction `type` to something else, you must specify the
+        desired identifier of the created reaction along with its upper and
+         lower bound. The name will be given by the metabolite name and the
+         given `type`.
+
+        Parameters
+        ----------
+        metabolite : cobra.Metabolite
+            Any given metabolite. The compartment is not checked but you are
+            encouraged to stick to the definition of exchanges and sinks.
+        type : str, {"exchange", "demand", "sink"}
+            Using one of the pre-defined reaction types is easiest. If you
+            want to create your own kind of boundary reaction choose
+            any other string, e.g., 'my-boundary'.
+        reaction_id : str, optional
+            The ID of the resulting reaction. Only used for custom reactions.
+        lb : float, optional
+            The lower bound of the resulting reaction. Only used for custom
+            reactions.
+        ub : float, optional
+            The upper bound of the resulting reaction. For the pre-defined
+            reactions this default value determines all bounds.
+
+        Returns
+        -------
+        cobra.Reaction
+            The created boundary reaction.
+
+        Examples
+        --------
+        >>> import cobra.test
+        >>> model = cobra.test.create_test_model("textbook")
+        >>> demand = model.add_boundary(model.metabolites.atp_c, type="demand")
+        >>> demand.id
+        'DM_atp_c'
+        >>> demand.name
+        'ATP demand'
+        >>> demand.bounds
+        (0, 1000.0)
+        >>> demand.build_reaction_string()
+        'atp_c --> '
+        """
+        types = dict(exchange=("EX", -ub, ub), demand=("DM", 0, ub),
+                     sink=("SK", -ub, ub))
+        if type in types:
+            prefix, lb, ub = types[type]
+            reaction_id = "{}_{}".format(prefix, metabolite.id)
+        if reaction_id in self.reactions:
+            raise ValueError('boundary %s already exists' % reaction_id)
+        name = "{} {}".format(metabolite.name, type)
+        rxn = Reaction(id=reaction_id, name=name, lower_bound=lb,
+                       upper_bound=ub)
+        rxn.add_metabolites({metabolite: -1})
+        self.add_reactions([rxn])
+        return rxn
+
+    def add_reactions(self, reaction_list):
+        """Add reactions to the model.
+
+        Reactions with identifiers identical to a reaction already in the
+        model are ignored.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        reaction_list : list
+            A list of `cobra.Reaction` objects
+        """
+
+        try:
+            reaction_list = DictList(reaction_list)
+        except TypeError:
+            reaction_list = DictList([reaction_list])
+
+        # First check whether the metabolites exist in the model
+        existing = [rxn for rxn in reaction_list if rxn.id in self.reactions]
+        for rxn in existing:
+            LOGGER.info('skip adding reaction %s as already existing', rxn.id)
+        reaction_list = [rxn for rxn in reaction_list
+                         if rxn.id not in existing]
+
+        context = get_context(self)
+
+        # Add reactions. Also take care of genes and metabolites in the loop
+        for reaction in reaction_list:
+            reaction._reset_var_cache()
+            reaction._model = self  # the reaction now points to the model
+            # keys() is necessary because the dict will be modified during
+            # the loop
+            for metabolite in list(reaction._metabolites.keys()):
+                # if the metabolite is not in the model, add it
+                # should we be adding a copy instead.
+                if metabolite not in self.metabolites:
+                    self.add_metabolites(metabolite)
+                # A copy of the metabolite exists in the model, the reaction
+                # needs to point to the metabolite in the model.
+                else:
+                    stoichiometry = reaction._metabolites.pop(metabolite)
+                    model_metabolite = self.metabolites.get_by_id(
+                        metabolite.id)
+                    reaction._metabolites[model_metabolite] = stoichiometry
+                    model_metabolite._reaction.add(reaction)
+                    if context:
+                        context(partial(
+                            model_metabolite._reaction.remove, reaction))
+
+            for gene in list(reaction._genes):
+                # If the gene is not in the model, add it
+                if not self.genes.has_id(gene.id):
+                    self.genes += [gene]
+                    gene._model = self
+
+                    if context:
+                        # Remove the gene later
+                        context(partial(self.genes.__isub__, [gene]))
+                        context(partial(setattr, gene, '_model', None))
+
+                # Otherwise, make the gene point to the one in the model
+                else:
+                    model_gene = self.genes.get_by_id(gene.id)
+                    if model_gene is not gene:
+                        reaction._dissociate_gene(gene)
+                        reaction._associate_gene(model_gene)
+
+        self.reactions += reaction_list
+
+        if context:
+            context(partial(self.reactions.__isub__, reaction_list))
+
+        # from cameo ...
+        self._populate_solver(reaction_list)
+
+    def remove_reactions(self, reactions, remove_orphans=False):
+        """Remove reactions from the model.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        reactions : list
+            A list with reactions (`cobra.Reaction`), or their id's, to remove
+
+        remove_orphans : bool
+            Remove orphaned genes and metabolites from the model as well
+
+        """
+        if isinstance(reactions, string_types) or hasattr(reactions, "id"):
+            warn("need to pass in a list")
+            reactions = [reactions]
+
+        context = get_context(self)
+
+        for reaction in reactions:
+
+            # Make sure the reaction is in the model
+            try:
+                reaction = self.reactions[self.reactions.index(reaction)]
+            except ValueError:
+                warn('%s not in %s' % (reaction, self))
+
+            else:
+                forward = reaction.forward_variable
+                reverse = reaction.reverse_variable
+                self.remove_cons_vars([forward, reverse])
+                self.reactions.remove(reaction)
+                reaction._model = None
+
+                if context:
+                    context(reaction._reset_var_cache)
+                    context(partial(setattr, reaction, '_model', self))
+                    context(partial(self.reactions.add, reaction))
+
+                for met in reaction._metabolites:
+                    if reaction in met._reaction:
+                        met._reaction.remove(reaction)
+                        if context:
+                            context(partial(met._reaction.add, reaction))
+                        if remove_orphans and len(met._reaction) == 0:
+                            self.remove_metabolites(met)
+
+                for gene in reaction._genes:
+                    if reaction in gene._reaction:
+                        gene._reaction.remove(reaction)
+                        if context:
+                            context(partial(gene._reaction.add, reaction))
+
+                        if remove_orphans and len(gene._reaction) == 0:
+                            self.genes.remove(gene)
+                            if context:
+                                context(partial(self.genes.add, gene))
+
+    def add_cons_vars(self, what, **kwargs):
+        """Add constraints and variables to the model's mathematical problem.
+
+        Useful for variables and constraints that can not be expressed with
+        reactions and simple lower and upper bounds.
+
+        Additions are reversed upon exit if the model itself is used as
+        context.
+
+        Parameters
+        ----------
+        what : list or tuple of optlang variables or constraints.
+           The variables or constraints to add to the model. Must be of
+           class `optlang.interface.Variable` or
+           `optlang.interface.Constraint`.
+        **kwargs : keyword arguments
+           Passed to solver.add()
+        """
+        add_cons_vars_to_problem(self, what, **kwargs)
+
+    def remove_cons_vars(self, what):
+        """Remove variables and constraints from the model's mathematical
+        problem.
+
+        Remove variables and constraints that were added directly to the
+        model's underlying mathematical problem. Removals are reversed
+        upon exit if the model itself is used as context.
+
+        Parameters
+        ----------
+        what : list or tuple of optlang variables or constraints.
+           The variables or constraints to add to the model. Must be of
+           class `optlang.interface.Variable` or
+           `optlang.interface.Constraint`.
+        """
+        remove_cons_vars_from_problem(self, what)
+
+    @property
+    def problem(self):
+        """The interface to the model's underlying mathematical problem.
+
+        Solutions to cobra models are obtained by formulating a mathematical
+        problem and solving it. Cobrapy uses the optlang package to
+        accomplish that and with this property you can get access to the
+        problem interface directly.
+
+        Returns
+        -------
+        optlang.interface
+            The problem interface that defines methods for interacting with
+            the problem and associated solver directly.
+        """
+        return self.solver.interface
+
+    @property
+    def variables(self):
+        """The mathematical variables in the cobra model.
+
+        In a cobra model, most variables are reactions. However,
+        for specific use cases, it may also be useful to have other types of
+        variables. This property defines all variables currently associated
+        with the model's problem.
+
+        Returns
+        -------
+        optlang.container.Container
+            A container with all associated variables.
+        """
+        return self.solver.variables
+
+    @property
+    def constraints(self):
+        """The constraints in the cobra model.
+
+        In a cobra model, most constraints are metabolites and their
+        stoichiometries. However, for specific use cases, it may also be
+        useful to have other types of constraints. This property defines all
+        constraints currently associated with the model's problem.
+
+        Returns
+        -------
+        optlang.container.Container
+            A container with all associated constraints.
+        """
+        return self.solver.constraints
+
+    @property
+    def exchanges(self):
+        """Exchange reactions in model.
+
+        Reactions that either don't have products or substrates.
+        """
+        return [rxn for rxn in self.reactions if rxn.boundary]
+
+    def _populate_solver(self, reaction_list, metabolite_list=None):
+        """Populate attached solver with constraints and variables that
+        model the provided reactions.
+        """
+        constraint_terms = AutoVivification()
+        to_add = []
+        if metabolite_list is not None:
+            for met in metabolite_list:
+                to_add += [self.problem.Constraint(
+                    S.Zero, name=met.id, lb=0, ub=0)]
+        self.add_cons_vars(to_add)
+
+        for reaction in reaction_list:
+
+            reverse_lb, reverse_ub, forward_lb, forward_ub = \
+                separate_forward_and_reverse_bounds(*reaction.bounds)
+
+            forward_variable = self.problem.Variable(
+                reaction.id, lb=forward_lb, ub=forward_ub)
+            reverse_variable = self.problem.Variable(
+                reaction.reverse_id, lb=reverse_lb, ub=reverse_ub)
+
+            self.add_cons_vars([forward_variable, reverse_variable])
+            self.solver.update()
+
+            for metabolite, coeff in six.iteritems(reaction.metabolites):
+                if metabolite.id in self.constraints:
+                    constraint = self.constraints[metabolite.id]
+                else:
+                    constraint = self.problem.Constraint(
+                        S.Zero,
+                        name=metabolite.id,
+                        lb=0, ub=0)
+                    self.add_cons_vars(constraint, sloppy=True)
+
+                constraint_terms[constraint][forward_variable] = coeff
+                constraint_terms[constraint][reverse_variable] = -coeff
+
+        self.solver.update()
+        for constraint, terms in six.iteritems(constraint_terms):
+            constraint.set_linear_coefficients(terms)
+
+    def to_array_based_model(self, deepcopy_model=False, **kwargs):
+        """Makes a `cobra.core.ArrayBasedModel` from a cobra.Model
+        which may be used to perform linear algebra operations with the
+        stoichiometric matrix.
+
+        Deprecated (0.6). Use `cobra.util.array.create_stoichiometric_matrix`
+        instead.
+
+        Parameters
+        ----------
+        deepcopy_model : bool
+            If False then the ArrayBasedModel points to the Model
+
+        """
+        warn("to_array_based_model is deprecated. "
+             "use cobra.util.array.create_stoichiometric_matrix instead",
+             DeprecationWarning)
+        from cobra.core.arraybasedmodel import ArrayBasedModel
+        return ArrayBasedModel(self, deepcopy_model=deepcopy_model, **kwargs)
+
+    def slim_optimize(self, error_value=float('nan'), message=None):
+        """Optimize model without creating a solution object.
+
+        Creating a full solution object implies fetching shadow prices and
+        flux values for all reactions and metabolites from the solver
+        object. This necessarily takes some time and in cases where only one
+        or two values are of interest, it is recommended to instead use this
+        function which does not create a solution object returning only the
+        value of the objective. Note however that the `optimize()` function
+        uses efficient means to fetch values so if you need fluxes/shadow
+        prices for more than say 4 reactions/metabolites, then the total
+        speed increase of `slim_optimize` versus `optimize` is  expected to
+        be small or even negative depending on how you fetch the values
+        after optimization.
+
+        Parameters
+        ----------
+        error_value : float, None
+           The value to return if optimization failed due to e.g.
+           infeasibility. If None, raise `OptimizationError` if the
+           optimization fails.
+        message : string
+           Error message to use if the model optimization did not succeed.
+
+        Returns
+        -------
+        float
+            The objective value.
+        """
+        self.solver.optimize()
+        if self.solver.status == optlang.interface.OPTIMAL:
+            return self.solver.objective.value
+        elif error_value is not None:
+            return error_value
+        else:
+            assert_optimal(self, message)
+
+    def optimize(self, objective_sense=None, raise_error=False, **kwargs):
+        """
+        Optimize the model using flux balance analysis.
+
+        Parameters
+        ----------
+        objective_sense : {None, 'maximize' 'minimize'}, optional
+            Whether fluxes should be maximized or minimized. In case of None,
+            the previous direction is used.
+        raise_error : bool
+            If true, raise an OptimizationError if solver status is not
+             optimal.
+        solver : {None, 'glpk', 'cglpk', 'gurobi', 'cplex'}, optional
+            If unspecified will use the currently defined `self.solver`
+            otherwise it will use the given solver and update the attribute.
+        quadratic_component : {None, scipy.sparse.dok_matrix}, optional
+            The dimensions should be (n, n) where n is the number of
+            reactions. This sets the quadratic component (Q) of the
+            objective coefficient, adding :math:`\\frac{1}{2} v^T \cdot Q
+            \cdot v` to the objective. Ignored for optlang based solvers.
+        tolerance_feasibility : float
+            Solver tolerance for feasibility. Ignored for optlang based
+            solvers
+        tolerance_markowitz : float
+            Solver threshold during pivot. Ignored for optlang based solvers
+        time_limit : float
+            Maximum solver time (in seconds). Ignored for optlang based solvers
+
+        Notes
+        -----
+        Only the most commonly used parameters are presented here.  Additional
+        parameters for cobra.solvers may be available and specified with the
+        appropriate keyword argument.
+
+        """
+        legacy, solver = choose_solver(self, solver=kwargs.get("solver"))
+        original_direction = self.objective.direction
+
+        if legacy:
+            if objective_sense is None:
+                objective_sense = {
+                    "max": "maximize", "min": "minimize"}[original_direction]
+            solution = optimize(self, objective_sense=objective_sense,
+                                **kwargs)
+            check_solver_status(solution.status, raise_error=raise_error)
+            return solution
+
+        self.solver = solver
+        self.objective.direction = \
+            {"maximize": "max", "minimize": "min"}.get(
+                objective_sense, original_direction)
+        self.slim_optimize()
+        solution = get_solution(self, raise_error=raise_error)
+        self.objective.direction = original_direction
+        return solution
+
+    def repair(self, rebuild_index=True, rebuild_relationships=True):
+        """Update all indexes and pointers in a model
+
+        Parameters
+        ----------
+        rebuild_index : bool
+            rebuild the indices kept in reactions, metabolites and genes
+        rebuild_relationships : bool
+             reset all associations between genes, metabolites, model and
+             then re-add them.
+        """
+        if rebuild_index:  # DictList indexes
+            self.reactions._generate_index()
+            self.metabolites._generate_index()
+            self.genes._generate_index()
+        if rebuild_relationships:
+            for met in self.metabolites:
+                met._reaction.clear()
+            for gene in self.genes:
+                gene._reaction.clear()
+            for rxn in self.reactions:
+                for met in rxn._metabolites:
+                    met._reaction.add(rxn)
+                for gene in rxn._genes:
+                    gene._reaction.add(rxn)
+        # point _model to self
+        for l in (self.reactions, self.genes, self.metabolites):
+            for e in l:
+                e._model = self
+
+    @property
+    def objective(self):
+        """Get or set the solver objective
+
+        Before introduction of the optlang based problems,
+        this function returned the objective reactions as a list. With
+        optlang, the objective is not limited a simple linear summation of
+        individual reaction fluxes, making that return value ambiguous.
+        Henceforth, use `cobra.util.solver.linear_reaction_coefficients` to
+        get a dictionary of reactions with their linear coefficients (empty
+        if there are none)
+
+        The set value can be dictionary (reactions as keys, linear
+        coefficients as values), string (reaction identifier), int (reaction
+        index), Reaction or problem.Objective or sympy expression
+        directly interpreted as objectives.
+
+        When using a `HistoryManager` context, this attribute can be set
+        temporarily, reversed when the exiting the context.
+        """
+        return self.solver.objective
+
+    @objective.setter
+    def objective(self, value):
+        if isinstance(value, sympy.Basic):
+            value = self.problem.Objective(value, sloppy=False)
+        if not isinstance(value, (dict, optlang.interface.Objective)):
+            try:
+                reactions = self.reactions.get_by_any(value)
+            except KeyError:
+                raise ValueError('invalid objective')
+            value = {rxn: 1 for rxn in reactions}
+        set_objective(self, value, additive=False)
+
+    def summary(self, solution=None, threshold=1E-8, fva=None, floatfmt='.3g'):
+        """Print a summary of the input and output fluxes of the model. This
+        method requires the model to have been previously solved.
+
+        Parameters
+        ----------
+        solution: cobra.core.Solution
+            A previously solved model solution to use for generating the
+            summary. If none provided (default), the summary method will
+            resolve the model. Note that the solution object must match the
+            model, i.e., changes to the model such as changed bounds,
+            added or removed reactions are not taken into account by this
+            method.
+
+        threshold : float
+            tolerance for determining if a flux is zero (not printed)
+
+        fva : int or None
+            Whether or not to calculate and report flux variability in the
+            output summary
+
+        floatfmt : string
+            format method for floats, passed to tabulate. Default is '.3g'.
+
+        """
+        from cobra.flux_analysis.summary import model_summary
+        return model_summary(self, solution=solution, threshold=threshold,
+                             fva=fva, floatfmt=floatfmt)
+
+    def __enter__(self):
+        """Record all future changes to the model, undoing them when a call to
+        __exit__ is received"""
+
+        # Create a new context and add it to the stack
+        try:
+            self._contexts.append(HistoryManager())
+        except AttributeError:
+            self._contexts = [HistoryManager()]
+
+        return self
+
+    def __exit__(self, type, value, traceback):
+        """Pop the top context manager and trigger the undo functions"""
+        context = self._contexts.pop()
+        context.reset()
+
+    def merge(self, right, prefix_existing=None, inplace=True,
+              objective='left'):
+        """Merge two models to create a model with the reactions from both
+        models.
+
+        Custom constraints and variables from right models are also copied
+        to left model, however note that, constraints and variables are
+        assumed to be the same if they have the same name.
+
+        right : cobra.Model
+            The model to add reactions from
+        prefix_existing : string
+            Prefix the reaction identifier in the right that already exist
+            in the left model with this string.
+        inplace : bool
+            Add reactions from right directly to left model object.
+            Otherwise, create a new model leaving the left model untouched.
+            When done within the model as context, changes to the models are
+            reverted upon exit.
+        objective : string
+            One of 'left', 'right' or 'sum' for setting the objective of the
+            resulting model to that of the corresponding model or the sum of
+            both.
+        """
+        if inplace:
+            new_model = self
+        else:
+            new_model = self.copy()
+            new_model.id = '{}_{}'.format(self.id, right.id)
+        new_reactions = deepcopy(right.reactions)
+        if prefix_existing is not None:
+            existing = new_reactions.query(
+                lambda rxn: rxn.id in self.reactions)
+            for reaction in existing:
+                reaction.id = '{}{}'.format(prefix_existing, reaction.id)
+        new_model.add_reactions(new_reactions)
+        interface = new_model.problem
+        new_vars = [interface.Variable.clone(v) for v in right.variables if
+                    v.name not in new_model.variables]
+        new_model.add_cons_vars(new_vars)
+        new_cons = [interface.Constraint.clone(c, model=new_model.solver)
+                    for c in right.constraints if
+                    c.name not in new_model.constraints]
+        new_model.add_cons_vars(new_cons, sloppy=True)
+        new_model.objective = dict(
+            left=self.objective,
+            right=right.objective,
+            sum=self.objective.expression + right.objective.expression
+        )[objective]
+        return new_model
+
+    def _repr_html_(self):
+        return """
+        <table>
+            <tr>
+                <td><strong>Name</strong></td>
+                <td>{name}</td>
+            </tr><tr>
+                <td><strong>Memory address</strong></td>
+                <td>{address}</td>
+            </tr><tr>
+                <td><strong>Number of metabolites</strong></td>
+                <td>{num_metabolites}</td>
+            </tr><tr>
+                <td><strong>Number of reactions</strong></td>
+                <td>{num_reactions}</td>
+            </tr><tr>
+                <td><strong>Objective expression</strong></td>
+                <td>{objective}</td>
+            </tr><tr>
+                <td><strong>Compartments</strong></td>
+                <td>{compartments}</td>
+            </tr>
+          </table>""".format(
+            name=self.id,
+            address='0x0%x' % id(self),
+            num_metabolites=len(self.metabolites),
+            num_reactions=len(self.reactions),
+            objective=str(self.objective.expression),
+            compartments=", ".join(
+                v if v else k for k, v in iteritems(self.compartments)
+            ))
diff --git a/cobra/core/object.py b/cobra/core/object.py
new file mode 100644
index 0000000..d92317e
--- /dev/null
+++ b/cobra/core/object.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from six import string_types
+
+
+class Object(object):
+    """Defines common behavior of object in cobra.core"""
+
+    def __init__(self, id=None, name=""):
+        """A simple object with an identifier
+
+        Parameters
+        ----------
+        id: None or a string
+            the identifier to associate with the object
+        """
+        self._id = id
+        self.name = name
+
+        self.notes = {}
+        self.annotation = {}
+
+    @property
+    def id(self):
+        return getattr(self, "_id", None)
+
+    @id.setter
+    def id(self, value):
+        if value == self.id:
+            pass
+        elif not isinstance(value, string_types):
+            raise TypeError("ID must be a string")
+        elif getattr(self, "_model", None) is not None:
+            self._set_id_with_model(value)
+        else:
+            self._id = value
+
+    def _set_id_with_model(self, value):
+        self._id = value
+
+    def __getstate__(self):
+        """To prevent excessive replication during deepcopy."""
+        state = self.__dict__.copy()
+        if '_model' in state:
+            state['_model'] = None
+        return state
+
+    def __repr__(self):
+        return "<%s %s at 0x%x>" % (self.__class__.__name__, self.id, id(self))
+
+    def __str__(self):
+        return str(self.id)
diff --git a/cobra/core/reaction.py b/cobra/core/reaction.py
new file mode 100644
index 0000000..e2d424f
--- /dev/null
+++ b/cobra/core/reaction.py
@@ -0,0 +1,1133 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, print_function
+
+import hashlib
+import re
+from collections import defaultdict
+from copy import copy, deepcopy
+from functools import partial
+from operator import attrgetter
+from warnings import warn
+
+from six import iteritems, iterkeys, string_types
+from future.utils import raise_from, raise_with_traceback
+
+from cobra.exceptions import OptimizationError
+from cobra.core.gene import Gene, ast2str, parse_gpr, eval_gpr
+from cobra.core.metabolite import Metabolite
+from cobra.core.object import Object
+from cobra.util.context import resettable, get_context
+from cobra.util.solver import (
+    linear_reaction_coefficients, set_objective, check_solver_status)
+
+# precompiled regular expressions
+# Matches and/or in a gene reaction rule
+and_or_search = re.compile(r'\(| and| or|\+|\)', re.IGNORECASE)
+uppercase_AND = re.compile(r'\bAND\b')
+uppercase_OR = re.compile(r'\bOR\b')
+gpr_clean = re.compile(' {2,}')
+# This regular expression finds any single letter compartment enclosed in
+# square brackets at the beginning of the string. For example [c] : foo --> bar
+compartment_finder = re.compile("^\s*(\[[A-Za-z]\])\s*:*")
+# Regular expressions to match the arrows
+_reversible_arrow_finder = re.compile("<(-+|=+)>")
+_forward_arrow_finder = re.compile("(-+|=+)>")
+_reverse_arrow_finder = re.compile("<(-+|=+)")
+
+
+class Reaction(Object):
+    """Reaction is a class for holding information regarding
+    a biochemical reaction in a cobra.Model object.
+
+    Parameters
+    ----------
+    id : string
+        The identifier to associate with this reaction
+    name : string
+        A human readable name for the reaction
+    subsystem : string
+        Subsystem where the reaction is meant to occur
+    lower_bound : float
+        The lower flux bound
+    upper_bound : float
+        The upper flux bound
+    """
+
+    def __init__(self, id=None, name='', subsystem='', lower_bound=0.,
+                 upper_bound=1000., objective_coefficient=0.):
+        Object.__init__(self, id, name)
+        self._gene_reaction_rule = ''
+        self.subsystem = subsystem
+
+        # The cobra.Genes that are used to catalyze the reaction
+        self._genes = set()
+
+        # A dictionary of metabolites and their stoichiometric coefficients in
+        # this reaction.
+        self._metabolites = dict()
+
+        # The set of compartments that partaking metabolites are in.
+        self._compartments = None
+
+        # self.model is None or refers to the cobra.Model that
+        # contains self
+        self._model = None
+
+        if objective_coefficient != 0:
+            raise NotImplementedError('setting objective coefficient when '
+                                      'creating reaction is no longer '
+                                      'supported. Use the model.objective '
+                                      'setter')
+
+        # Used during optimization.  Indicates whether the
+        # variable is modeled as continuous, integer, binary, semicontinous, or
+        # semiinteger.
+        self.variable_kind = 'continuous'
+
+        # from cameo ...
+        self._lower_bound = lower_bound
+        self._upper_bound = upper_bound
+
+        self._reverse_variable = None
+        self._forward_variable = None
+
+    def _set_id_with_model(self, value):
+        if value in self.model.reactions:
+            raise ValueError("The model already contains a reaction with"
+                             " the id:", value)
+        forward_variable = self.forward_variable
+        reverse_variable = self.reverse_variable
+        self._id = value
+        self.model.reactions._generate_index()
+        forward_variable.name = self.id
+        reverse_variable.name = self.reverse_id
+
+    @property
+    def reverse_id(self):
+        """Generate the id of reverse_variable from the reaction's id."""
+        return '_'.join((self.id, 'reverse',
+                         hashlib.md5(
+                             self.id.encode('utf-8')).hexdigest()[0:5]))
+
+    @property
+    def flux_expression(self):
+        """Forward flux expression
+
+        Returns
+        -------
+        sympy expression
+            The expression represeenting the the forward flux (if associated
+            with model), otherwise None. Representing the net flux if
+            model.reversible_encoding == 'unsplit' or None if reaction is
+            not associated with a model """
+        if self.model is not None:
+            return 1. * self.forward_variable - 1. * self.reverse_variable
+        else:
+            return None
+
+    @property
+    def forward_variable(self):
+        """An optlang variable representing the forward flux
+
+        Returns
+        -------
+        optlang.interface.Variable
+            An optlang variable for the forward flux or None if reaction is
+            not associated with a model.
+        """
+        if self.model is not None:
+            if self._forward_variable is None:
+                self._forward_variable = self.model.variables[
+                    self.id]
+            assert self._forward_variable.problem is self.model.solver
+            return self._forward_variable
+        else:
+            return None
+
+    @property
+    def reverse_variable(self):
+        """An optlang variable representing the reverse flux
+
+        Returns
+        -------
+        optlang.interface.Variable
+            An optlang variable for the reverse flux or None if reaction is
+            not associated with a model.
+        """
+        model = self.model
+        if model is not None:
+            if self._reverse_variable is None:
+                self._reverse_variable = model.variables[
+                    self.reverse_id]
+            assert self._reverse_variable.problem is self.model.solver
+            return self._reverse_variable
+        else:
+            return None
+
+    def _reset_var_cache(self):
+        self._forward_variable = None
+        self._reverse_variable = None
+
+    @property
+    def objective_coefficient(self):
+        """ Get the coefficient for this reaction in a linear
+        objective (float)
+
+        Assuming that the objective of the associated model is summation of
+        fluxes from a set of reactions, the coefficient for each reaction
+        can be obtained individually using this property. A more general way
+        is to use the `model.objective` property directly.
+        """
+        return linear_reaction_coefficients(self.model, [self]).get(self, 0)
+
+    @objective_coefficient.setter
+    def objective_coefficient(self, value):
+        if self.model is None:
+            raise AttributeError('cannot assign objective to a missing model')
+        if self.flux_expression is not None:
+            set_objective(self.model, {self: value}, additive=True)
+
+    def __copy__(self):
+        cop = copy(super(Reaction, self))
+        cop._reset_var_cache()
+        return cop
+
+    def __deepcopy__(self, memo):
+        cop = deepcopy(super(Reaction, self), memo)
+        cop._reset_var_cache()
+        return cop
+
+    @property
+    def lower_bound(self):
+        """Get or set the lower bound
+
+        Setting the lower bound (float) will also adjust the associated optlang
+        variables associated with the reaction. Infeasible combinations,
+        such as a lower bound higher than the current upper bound will
+        update the other bound.
+
+        When using a `HistoryManager` context, this attribute can be set
+        temporarily, reversed when the exiting the context.
+        """
+        return self._lower_bound
+
+    @lower_bound.setter
+    @resettable
+    def lower_bound(self, value):
+        if self.upper_bound < value:
+            self.upper_bound = value
+
+        self._lower_bound = value
+        update_forward_and_reverse_bounds(self, 'lower')
+
+    @property
+    def upper_bound(self):
+        """Get or set the upper bound
+
+        Setting the upper bound (float) will also adjust the associated optlang
+        variables associated with the reaction. Infeasible combinations,
+        such as a upper bound lower than the current lower bound will
+        update the other bound.
+
+        When using a `HistoryManager` context, this attribute can be set
+        temporarily, reversed when the exiting the context.
+        """
+        return self._upper_bound
+
+    @upper_bound.setter
+    @resettable
+    def upper_bound(self, value):
+        if self.lower_bound > value:
+            self.lower_bound = value
+
+        self._upper_bound = value
+        update_forward_and_reverse_bounds(self, 'upper')
+
+    @property
+    def bounds(self):
+        """ Get or set the bounds directly from a tuple
+
+        Convenience method for setting upper and lower bounds in one line
+        using a tuple of lower and upper bound. Invalid bounds will raise an
+        AssertionError.
+
+        When using a `HistoryManager` context, this attribute can be set
+        temporarily, reversed when the exiting the context.
+        """
+        return self.lower_bound, self.upper_bound
+
+    @bounds.setter
+    @resettable
+    def bounds(self, value):
+        assert value[0] <= value[1], "Invalid bounds: {}".format(value)
+        self._lower_bound, self._upper_bound = value
+        update_forward_and_reverse_bounds(self)
+
+    @property
+    def flux(self):
+        """
+        The flux value in the most recent solution.
+
+        Flux is the primal value of the corresponding variable in the model.
+
+        Warnings
+        --------
+        * Accessing reaction fluxes through a `Solution` object is the safer,
+          preferred, and only guaranteed to be correct way. You can see how to
+          do so easily in the examples.
+        * Reaction flux is retrieved from the currently defined
+          `self._model.solver`. The solver status is checked but there are no
+          guarantees that the current solver state is the one you are looking
+          for.
+        * If you modify the underlying model after an optimization, you will
+          retrieve the old optimization values.
+
+        Raises
+        ------
+        RuntimeError
+            If the underlying model was never optimized beforehand or the
+            reaction is not part of a model.
+        OptimizationError
+            If the solver status is anything other than 'optimal'.
+        AssertionError
+            If the flux value is not within the bounds.
+
+        Examples
+        --------
+        >>> import cobra.test
+        >>> model = cobra.test.create_test_model("textbook")
+        >>> solution = model.optimize()
+        >>> model.reactions.PFK.flux
+        7.477381962160283
+        >>> solution.fluxes.PFK
+        7.4773819621602833
+        """
+        try:
+            check_solver_status(self._model.solver.status)
+            return self.forward_variable.primal - self.reverse_variable.primal
+        except AttributeError:
+            raise RuntimeError(
+                "reaction '{}' is not part of a model".format(self.id))
+        # Due to below all-catch, which sucks, need to reraise these.
+        except (RuntimeError, OptimizationError) as err:
+            raise_with_traceback(err)
+        # Would love to catch CplexSolverError and GurobiError here.
+        except Exception as err:
+            raise_from(OptimizationError(
+                "Likely no solution exists. Original solver message: {}."
+                "".format(str(err))), err)
+
+    @property
+    def reduced_cost(self):
+        """
+        The reduced cost in the most recent solution.
+
+        Reduced cost is the dual value of the corresponding variable in the
+        model.
+
+        Warnings
+        --------
+        * Accessing reduced costs through a `Solution` object is the safer,
+          preferred, and only guaranteed to be correct way. You can see how to
+          do so easily in the examples.
+        * Reduced cost is retrieved from the currently defined
+          `self._model.solver`. The solver status is checked but there are no
+          guarantees that the current solver state is the one you are looking
+          for.
+        * If you modify the underlying model after an optimization, you will
+          retrieve the old optimization values.
+
+        Raises
+        ------
+        RuntimeError
+            If the underlying model was never optimized beforehand or the
+            reaction is not part of a model.
+        OptimizationError
+            If the solver status is anything other than 'optimal'.
+
+        Examples
+        --------
+        >>> import cobra.test
+        >>> model = cobra.test.create_test_model("textbook")
+        >>> solution = model.optimize()
+        >>> model.reactions.PFK.reduced_cost
+        -8.673617379884035e-18
+        >>> solution.reduced_costs.PFK
+        -8.6736173798840355e-18
+        """
+        try:
+            check_solver_status(self._model.solver.status)
+            return self.forward_variable.dual - self.reverse_variable.dual
+        except AttributeError:
+            raise RuntimeError(
+                "reaction '{}' is not part of a model".format(self.id))
+        # Due to below all-catch, which sucks, need to reraise these.
+        except (RuntimeError, OptimizationError) as err:
+            raise_with_traceback(err)
+        # Would love to catch CplexSolverError and GurobiError here.
+        except Exception as err:
+            raise_from(OptimizationError(
+                "Likely no solution exists. Original solver message: {}."
+                "".format(str(err))), err)
+
+    # read-only
+    @property
+    def metabolites(self):
+        return self._metabolites.copy()
+
+    @property
+    def genes(self):
+        return frozenset(self._genes)
+
+    @property
+    def gene_reaction_rule(self):
+        return self._gene_reaction_rule
+
+    @gene_reaction_rule.setter
+    def gene_reaction_rule(self, new_rule):
+
+        # TODO: Do this :)
+        if get_context(self):
+            warn("Context management not implemented for "
+                 "gene reaction rules")
+
+        self._gene_reaction_rule = new_rule.strip()
+        try:
+            _, gene_names = parse_gpr(self._gene_reaction_rule)
+        except (SyntaxError, TypeError) as e:
+            if "AND" in new_rule or "OR" in new_rule:
+                warn("uppercase AND/OR found in rule '%s' for '%s'" %
+                     (new_rule, repr(self)))
+                new_rule = uppercase_AND.sub("and", new_rule)
+                new_rule = uppercase_OR.sub("or", new_rule)
+                self.gene_reaction_rule = new_rule
+                return
+            warn("malformed gene_reaction_rule '%s' for %s" %
+                 (new_rule, repr(self)))
+            tmp_str = and_or_search.sub('', self._gene_reaction_rule)
+            gene_names = set((gpr_clean.sub(' ', tmp_str).split(' ')))
+        if '' in gene_names:
+            gene_names.remove('')
+        old_genes = self._genes
+        if self._model is None:
+            self._genes = {Gene(i) for i in gene_names}
+        else:
+            model_genes = self._model.genes
+            self._genes = set()
+            for id in gene_names:
+                if model_genes.has_id(id):
+                    self._genes.add(model_genes.get_by_id(id))
+                else:
+                    new_gene = Gene(id)
+                    new_gene._model = self._model
+                    self._genes.add(new_gene)
+                    model_genes.append(new_gene)
+
+        # Make the genes aware that it is involved in this reaction
+        for g in self._genes:
+            g._reaction.add(self)
+
+        # make the old genes aware they are no longer involved in this reaction
+        for g in old_genes:
+            if g not in self._genes:  # if an old gene is not a new gene
+                try:
+                    g._reaction.remove(self)
+                except:
+                    warn("could not remove old gene %s from reaction %s" %
+                         (g.id, self.id))
+
+    @property
+    def gene_name_reaction_rule(self):
+        """Display gene_reaction_rule with names intead.
+
+        Do NOT use this string for computation. It is intended to give a
+        representation of the rule using more familiar gene names instead of
+        the often cryptic ids.
+
+        """
+        names = {i.id: i.name for i in self._genes}
+        ast = parse_gpr(self._gene_reaction_rule)[0]
+        return ast2str(ast, names=names)
+
+    @property
+    def functional(self):
+        """All required enzymes for reaction are functional.
+
+        Returns
+        -------
+        bool
+            True if the gene-protein-reaction (GPR) rule is fulfilled for
+            this reaction, or if reaction is not associated to a model,
+            otherwise False.
+        """
+        if self._model:
+            tree, _ = parse_gpr(self.gene_reaction_rule)
+            return eval_gpr(tree, {gene.id for gene in self.genes if
+                                   not gene.functional})
+        return True
+
+    @property
+    def x(self):
+        """The flux through the reaction in the most recent solution.
+
+        Flux values are computed from the primal values of the variables in
+        the solution.
+        """
+        warn("Please use reaction.flux instead.", DeprecationWarning)
+        return self.flux
+
+    @property
+    def y(self):
+        """The reduced cost of the reaction in the most recent solution.
+
+        Reduced costs are computed from the dual values of the variables in
+        the solution.
+        """
+        warn("Please use reaction.reduced_cost instead.", DeprecationWarning)
+        return self.reduced_cost
+
+    @property
+    def reversibility(self):
+        """Whether the reaction can proceed in both directions (reversible)
+
+        This is computed from the current upper and lower bounds.
+
+        """
+        return self._lower_bound < 0 < self._upper_bound
+
+    @reversibility.setter
+    def reversibility(self, value):
+        warn("Setting reaction reversibility is ignored")
+
+    @property
+    def boundary(self):
+        """Whether or not this reaction is an exchange reaction.
+
+        Returns `True` if the reaction has either no products or reactants.
+        """
+        return (len(self.metabolites) == 1 and
+                not (self.reactants and self.products))
+
+    @property
+    def model(self):
+        """returns the model the reaction is a part of"""
+        return self._model
+
+    def _update_awareness(self):
+        """Make sure all metabolites and genes that are associated with
+        this reaction are aware of it.
+
+        """
+        for x in self._metabolites:
+            x._reaction.add(self)
+        for x in self._genes:
+            x._reaction.add(self)
+
+    def remove_from_model(self, remove_orphans=False):
+        """Removes the reaction from a model.
+
+        This removes all associations between a reaction the associated
+        model, metabolites and genes.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        remove_orphans : bool
+            Remove orphaned genes and metabolites from the model as well
+
+        """
+        self._model.remove_reactions([self], remove_orphans=remove_orphans)
+
+    def delete(self, remove_orphans=False):
+        """Removes the reaction from a model.
+
+        This removes all associations between a reaction the associated
+        model, metabolites and genes.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Deprecated, use `reaction.remove_from_model` instead.
+
+        Parameters
+        ----------
+        remove_orphans : bool
+            Remove orphaned genes and metabolites from the model as well
+
+        """
+        warn("delete is deprecated. Use reaction.remove_from_model instead",
+             DeprecationWarning)
+        self.remove_from_model(remove_orphans=remove_orphans)
+
+    def __setstate__(self, state):
+        """Probably not necessary to set _model as the cobra.Model that
+        contains self sets the _model attribute for all metabolites and genes
+        in the reaction.
+
+        However, to increase performance speed we do want to let the metabolite
+        and gene know that they are employed in this reaction
+
+        """
+        # These are necessary for old pickles which store attributes
+        # which have since been superceded by properties.
+        if "reaction" in state:
+            state.pop("reaction")
+        if "gene_reaction_rule" in state:
+            state["_gene_reaction_rule"] = state.pop("gene_reaction_rule")
+        if "lower_bound" in state:
+            state['_lower_bound'] = state.pop('lower_bound')
+        if "upper_bound" in state:
+            state['_upper_bound'] = state.pop('upper_bound')
+
+        self.__dict__.update(state)
+        for x in state['_metabolites']:
+            setattr(x, '_model', self._model)
+            x._reaction.add(self)
+        for x in state['_genes']:
+            setattr(x, '_model', self._model)
+            x._reaction.add(self)
+
+    def copy(self):
+        """Copy a reaction
+
+        The referenced metabolites and genes are also copied.
+
+        """
+        # no references to model when copying
+        model = self._model
+        self._model = None
+        for i in self._metabolites:
+            i._model = None
+        for i in self._genes:
+            i._model = None
+        # now we can copy
+        new_reaction = deepcopy(self)
+        # restore the references
+        self._model = model
+        for i in self._metabolites:
+            i._model = model
+        for i in self._genes:
+            i._model = model
+        return new_reaction
+
+    def __add__(self, other):
+        """Add two reactions
+
+        The stoichiometry will be the combined stoichiometry of the two
+        reactions, and the gene reaction rule will be both rules combined by an
+        and. All other attributes (i.e. reaction bounds) will match those of
+        the first reaction
+
+        """
+        new_reaction = self.copy()
+        new_reaction += other
+        return new_reaction
+
+    def __iadd__(self, other):
+        self.add_metabolites(other._metabolites, combine=True)
+        gpr1 = self.gene_reaction_rule.strip()
+        gpr2 = other.gene_reaction_rule.strip()
+        if gpr1 != '' and gpr2 != '':
+            self.gene_reaction_rule = "(%s) and (%s)" % \
+                                      (self.gene_reaction_rule,
+                                       other.gene_reaction_rule)
+        elif gpr1 != '' and gpr2 == '':
+            self.gene_reaction_rule = gpr1
+        elif gpr1 == '' and gpr2 != '':
+            self.gene_reaction_rule = gpr2
+        return self
+
+    def __sub__(self, other):
+        new = self.copy()
+        new -= other
+        return new
+
+    def __isub__(self, other):
+        self.subtract_metabolites(other._metabolites, combine=True)
+        return self
+
+    def __imul__(self, coefficient):
+        """Scale coefficients in a reaction by a given value
+
+        E.g. A -> B becomes 2A -> 2B
+        """
+        self._metabolites = {
+            met: value * coefficient
+            for met, value in iteritems(self._metabolites)}
+        return self
+
+    def __mul__(self, coefficient):
+        new = self.copy()
+        new *= coefficient
+        return new
+
+    @property
+    def reactants(self):
+        """Return a list of reactants for the reaction."""
+        return [k for k, v in iteritems(self._metabolites) if v < 0]
+
+    @property
+    def products(self):
+        """Return a list of products for the reaction"""
+        return [k for k, v in iteritems(self._metabolites) if v >= 0]
+
+    def get_coefficient(self, metabolite_id):
+        """
+        Return the stoichiometric coefficient of a metabolite.
+
+        Parameters
+        ----------
+        metabolite_id : str or cobra.Metabolite
+
+        """
+        if isinstance(metabolite_id, Metabolite):
+            return self._metabolites[metabolite_id]
+
+        _id_to_metabolites = {m.id: m for m in self._metabolites}
+        return self._metabolites[_id_to_metabolites[metabolite_id]]
+
+    def get_coefficients(self, metabolite_ids):
+        """Return the stoichiometric coefficients for a list of
+        metabolites in the reaction.
+
+        Parameters
+        ----------
+        metabolite_ids : iterable
+            Containing ``str`` or ``cobra.Metabolite``s.
+
+        """
+        return map(self.get_coefficient, metabolite_ids)
+
+    def add_metabolites(self, metabolites_to_add, combine=True,
+                        reversibly=True):
+        """Add metabolites and stoichiometric coefficients to the reaction.
+        If the final coefficient for a metabolite is 0 then it is removed
+        from the reaction.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        metabolites_to_add : dict
+            Dictionary with metabolite objects or metabolite identifiers as
+            keys and coefficients as values. If keys are strings (name of a
+            metabolite) the reaction must already be part of a model and a
+            metabolite with the given name must exist in the model.
+
+        combine : bool
+            Describes behavior a metabolite already exists in the reaction.
+            True causes the coefficients to be added.
+            False causes the coefficient to be replaced.
+
+        reversibly : bool
+            Whether to add the change to the context to make the change
+            reversibly or not (primarily intended for internal use).
+
+        """
+        old_coefficients = self.metabolites
+        new_metabolites = []
+        _id_to_metabolites = dict([(x.id, x) for x in self._metabolites])
+
+        for metabolite, coefficient in iteritems(metabolites_to_add):
+            met_id = str(metabolite)
+            # If a metabolite already exists in the reaction then
+            # just add them.
+            if met_id in _id_to_metabolites:
+                reaction_metabolite = _id_to_metabolites[met_id]
+                if combine:
+                    self._metabolites[reaction_metabolite] += coefficient
+                else:
+                    self._metabolites[reaction_metabolite] = coefficient
+            else:
+                # If the reaction is in a model, ensure we aren't using
+                # a duplicate metabolite.
+                if self._model:
+                    try:
+                        metabolite = \
+                            self._model.metabolites.get_by_id(met_id)
+                    except KeyError as e:
+                        if isinstance(metabolite, Metabolite):
+                            new_metabolites.append(metabolite)
+                        else:
+                            # do we want to handle creation here?
+                            raise e
+                elif isinstance(metabolite, string_types):
+                    # if we want to handle creation, this should be changed
+                    raise ValueError("Reaction '%s' does not belong to a "
+                                     "model. Either add the reaction to a "
+                                     "model or use Metabolite objects instead "
+                                     "of strings as keys."
+                                     % self.id)
+                self._metabolites[metabolite] = coefficient
+                # make the metabolite aware that it is involved in this
+                # reaction
+                metabolite._reaction.add(self)
+
+        for metabolite, the_coefficient in list(self._metabolites.items()):
+            if the_coefficient == 0:
+                # make the metabolite aware that it no longer participates
+                # in this reaction
+                metabolite._reaction.remove(self)
+                self._metabolites.pop(metabolite)
+
+        # from cameo ...
+        model = self.model
+        if model is not None:
+            model.add_metabolites(new_metabolites)
+
+            for metabolite, coefficient in metabolites_to_add.items():
+
+                if isinstance(metabolite,
+                              str):  # support metabolites added as strings.
+                    metabolite = model.metabolites.get_by_id(metabolite)
+                if combine:
+                    try:
+                        old_coefficient = old_coefficients[metabolite]
+                    except KeyError:
+                        pass
+                    else:
+                        coefficient = coefficient + old_coefficient
+
+                model.constraints[
+                    metabolite.id].set_linear_coefficients(
+                    {self.forward_variable: coefficient,
+                     self.reverse_variable: -coefficient
+                     })
+
+        context = get_context(self)
+        if context and reversibly:
+            if combine:
+                # Just subtract the metabolites that were added
+                context(partial(
+                    self.subtract_metabolites, metabolites_to_add,
+                    combine=True, reversibly=False))
+            else:
+                # Reset them with add_metabolites
+                mets_to_reset = {
+                    key: old_coefficients[model.metabolites.get_by_any(key)[0]]
+                    for key in iterkeys(metabolites_to_add)}
+
+                context(partial(
+                    self.add_metabolites, mets_to_reset,
+                    combine=False, reversibly=False))
+
+    def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
+        """This function will 'subtract' metabolites from a reaction, which
+        means add the metabolites with -1*coefficient. If the final coefficient
+        for a metabolite is 0 then the metabolite is removed from the reaction.
+
+        The change is reverted upon exit when using the model as a context.
+
+        Parameters
+        ----------
+        metabolites : dict
+            Dictionary where the keys are of class Metabolite and the values
+            are the coefficients. These metabolites will be added to the
+            reaction.
+
+        combine : bool
+            Describes behavior a metabolite already exists in the reaction.
+            True causes the coefficients to be added.
+            False causes the coefficient to be replaced.
+
+        reversibly : bool
+            Whether to add the change to the context to make the change
+            reversibly or not (primarily intended for internal use).
+
+        .. note:: A final coefficient < 0 implies a reactant.
+
+        """
+        self.add_metabolites({
+            k: -v for k, v in iteritems(metabolites)},
+            combine=combine, reversibly=reversibly)
+
+    @property
+    def reaction(self):
+        """Human readable reaction string"""
+        return self.build_reaction_string()
+
+    @reaction.setter
+    def reaction(self, value):
+        return self.build_reaction_from_string(value)
+
+    def build_reaction_string(self, use_metabolite_names=False):
+        """Generate a human readable reaction string"""
+
+        def format(number):
+            return "" if number == 1 else str(number).rstrip(".") + " "
+
+        id_type = 'id'
+        if use_metabolite_names:
+            id_type = 'name'
+        reactant_bits = []
+        product_bits = []
+        for met in sorted(self._metabolites, key=attrgetter("id")):
+            coefficient = self._metabolites[met]
+            name = str(getattr(met, id_type))
+            if coefficient >= 0:
+                product_bits.append(format(coefficient) + name)
+            else:
+                reactant_bits.append(format(abs(coefficient)) + name)
+
+        reaction_string = ' + '.join(reactant_bits)
+        if not self.reversibility:
+            if self.lower_bound < 0 and self.upper_bound <= 0:
+                reaction_string += ' <-- '
+            else:
+                reaction_string += ' --> '
+        else:
+            reaction_string += ' <=> '
+        reaction_string += ' + '.join(product_bits)
+        return reaction_string
+
+    def check_mass_balance(self):
+        """Compute mass and charge balance for the reaction
+
+        returns a dict of {element: amount} for unbalanced elements.
+        "charge" is treated as an element in this dict
+        This should be empty for balanced reactions.
+        """
+        reaction_element_dict = defaultdict(int)
+        for metabolite, coefficient in iteritems(self._metabolites):
+            if metabolite.charge is not None:
+                reaction_element_dict["charge"] += \
+                    coefficient * metabolite.charge
+            if metabolite.elements is None:
+                raise ValueError("No elements found in metabolite %s"
+                                 % metabolite.id)
+            for element, amount in iteritems(metabolite.elements):
+                reaction_element_dict[element] += coefficient * amount
+        # filter out 0 values
+        return {k: v for k, v in iteritems(reaction_element_dict) if v != 0}
+
+    @property
+    def compartments(self):
+        """lists compartments the metabolites are in"""
+        if self._compartments is None:
+            self._compartments = {met.compartment for met in self._metabolites
+                                  if met.compartment is not None}
+        return self._compartments
+
+    def get_compartments(self):
+        """lists compartments the metabolites are in"""
+        return list(self.compartments)
+
+    def _associate_gene(self, cobra_gene):
+        """Associates a cobra.Gene object with a cobra.Reaction.
+
+        Parameters
+        ----------
+        cobra_gene : cobra.core.Gene.Gene
+
+        """
+        self._genes.add(cobra_gene)
+        cobra_gene._reaction.add(self)
+        cobra_gene._model = self._model
+
+    def _dissociate_gene(self, cobra_gene):
+        """Dissociates a cobra.Gene object with a cobra.Reaction.
+
+        Parameters
+        ----------
+        cobra_gene : cobra.core.Gene.Gene
+
+        """
+        self._genes.discard(cobra_gene)
+        cobra_gene._reaction.discard(self)
+
+    def knock_out(self):
+        """Knockout reaction by setting its bounds to zero."""
+        self.bounds = (0, 0)
+
+    def build_reaction_from_string(self, reaction_str, verbose=True,
+                                   fwd_arrow=None, rev_arrow=None,
+                                   reversible_arrow=None, term_split="+"):
+        """Builds reaction from reaction equation reaction_str using parser
+
+        Takes a string and using the specifications supplied in the optional
+        arguments infers a set of metabolites, metabolite compartments and
+        stoichiometries for the reaction.  It also infers the reversibility
+        of the reaction from the reaction arrow.
+
+        Changes to the associated model are reverted upon exit when using
+        the model as a context.
+
+        Parameters
+        ----------
+        reaction_str : string
+            a string containing a reaction formula (equation)
+        verbose: bool
+            setting verbosity of function
+        fwd_arrow : re.compile
+            for forward irreversible reaction arrows
+        rev_arrow : re.compile
+            for backward irreversible reaction arrows
+        reversible_arrow : re.compile
+            for reversible reaction arrows
+        term_split : string
+            dividing individual metabolite entries
+
+        """
+        # set the arrows
+        forward_arrow_finder = _forward_arrow_finder if fwd_arrow is None \
+            else re.compile(re.escape(fwd_arrow))
+        reverse_arrow_finder = _reverse_arrow_finder if rev_arrow is None \
+            else re.compile(re.escape(rev_arrow))
+        reversible_arrow_finder = _reversible_arrow_finder \
+            if reversible_arrow is None \
+            else re.compile(re.escape(reversible_arrow))
+        if self._model is None:
+            warn("no model found")
+            model = None
+        else:
+            model = self._model
+        found_compartments = compartment_finder.findall(reaction_str)
+        if len(found_compartments) == 1:
+            compartment = found_compartments[0]
+            reaction_str = compartment_finder.sub("", reaction_str)
+        else:
+            compartment = ""
+
+        # reversible case
+        arrow_match = reversible_arrow_finder.search(reaction_str)
+        if arrow_match is not None:
+            self.lower_bound = -1000
+            self.upper_bound = 1000
+        else:  # irreversible
+            # try forward
+            arrow_match = forward_arrow_finder.search(reaction_str)
+            if arrow_match is not None:
+                self.upper_bound = 1000
+                self.lower_bound = 0
+            else:
+                # must be reverse
+                arrow_match = reverse_arrow_finder.search(reaction_str)
+                if arrow_match is None:
+                    raise ValueError("no suitable arrow found in '%s'" %
+                                     reaction_str)
+                else:
+                    self.upper_bound = 0
+                    self.lower_bound = -1000
+        reactant_str = reaction_str[:arrow_match.start()].strip()
+        product_str = reaction_str[arrow_match.end():].strip()
+
+        self.subtract_metabolites(self.metabolites, combine=True)
+
+        for substr, factor in ((reactant_str, -1), (product_str, 1)):
+            if len(substr) == 0:
+                continue
+            for term in substr.split(term_split):
+                term = term.strip()
+                if term.lower() == "nothing":
+                    continue
+                if " " in term:
+                    num_str, met_id = term.split()
+                    num = float(num_str.lstrip("(").rstrip(")")) * factor
+                else:
+                    met_id = term
+                    num = factor
+                met_id += compartment
+                try:
+                    met = model.metabolites.get_by_id(met_id)
+                except KeyError:
+                    if verbose:
+                        print("unknown metabolite '%s' created" % met_id)
+                    met = Metabolite(met_id)
+                self.add_metabolites({met: num})
+
+    def __str__(self):
+        return "{id}: {stoichiometry}".format(
+            id=self.id, stoichiometry=self.build_reaction_string())
+
+    def _repr_html_(self):
+        return """
+        <table>
+            <tr>
+                <td><strong>Reaction identifier</strong></td><td>{id}</td>
+            </tr><tr>
+                <td><strong>Name</strong></td><td>{name}</td>
+            </tr><tr>
+                <td><strong>Memory address</strong></td>
+                <td>{address}</td>
+            </tr><tr>
+                <td><strong>Stoichiometry</strong></td>
+                <td>
+                    <p style='text-align:right'>{stoich_id}</p>
+                    <p style='text-align:right'>{stoich_name}</p>
+                </td>
+            </tr><tr>
+                <td><strong>GPR</strong></td><td>{gpr}</td>
+            </tr><tr>
+                <td><strong>Lower bound</strong></td><td>{lb}</td>
+            </tr><tr>
+                <td><strong>Upper bound</strong></td><td>{ub}</td>
+            </tr>
+        </table>
+        """.format(id=self.id, name=self.name,
+                   address='0x0%x' % id(self),
+                   stoich_id=self.build_reaction_string(),
+                   stoich_name=self.build_reaction_string(True),
+                   gpr=self.gene_reaction_rule,
+                   lb=self.lower_bound, ub=self.upper_bound)
+
+
+def separate_forward_and_reverse_bounds(lower_bound, upper_bound):
+    """Split a given (lower_bound, upper_bound) interval into a negative
+    component and a positive component. Negative components are negated
+    (returns positive ranges) and flipped for usage with forward and reverse
+    reactions bounds
+
+    Parameters
+    ----------
+    lower_bound : float
+        The lower flux bound
+    upper_bound : float
+        The upper flux bound
+    """
+
+    assert lower_bound <= upper_bound, "lower bound is greater than upper"
+
+    bounds_list = [0, 0, lower_bound, upper_bound]
+    bounds_list.sort()
+
+    return -bounds_list[1], -bounds_list[0], bounds_list[2], bounds_list[3]
+
+
+def update_forward_and_reverse_bounds(reaction, direction='both'):
+    """For the given reaction, update the bounds in the forward and
+    reverse variable bounds.
+
+    Parameters
+    ----------
+    reaction : cobra.Reaction
+       The reaction to operate on
+    direction : string
+       Either 'both', 'upper' or 'lower' for updating the corresponding flux
+       bounds.
+    """
+
+    reverse_lb, reverse_ub, forward_lb, forward_ub = \
+        separate_forward_and_reverse_bounds(*reaction.bounds)
+
+    try:
+        # Clear the original bounds to avoid complaints
+        if direction == 'both':
+            reaction.forward_variable._ub = None
+            reaction.reverse_variable._lb = None
+            reaction.reverse_variable._ub = None
+            reaction.forward_variable._lb = None
+
+            reaction.forward_variable.set_bounds(lb=forward_lb, ub=forward_ub)
+            reaction.reverse_variable.set_bounds(lb=reverse_lb, ub=reverse_ub)
+
+        elif direction == 'upper':
+            reaction.forward_variable.ub = forward_ub
+            reaction.reverse_variable.lb = reverse_lb
+
+        elif direction == 'lower':
+            reaction.reverse_variable.ub = reverse_ub
+            reaction.forward_variable.lb = forward_lb
+
+    except AttributeError:
+        pass
diff --git a/cobra/core/solution.py b/cobra/core/solution.py
new file mode 100644
index 0000000..e730565
--- /dev/null
+++ b/cobra/core/solution.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+
+"""Provide unified interfaces to optimization solutions."""
+
+from __future__ import absolute_import
+
+import logging
+from builtins import object, super
+from warnings import warn
+
+from numpy import empty, nan
+from optlang.interface import OPTIMAL
+from pandas import Series, DataFrame
+
+from cobra.util.solver import check_solver_status
+
+__all__ = ("Solution", "LegacySolution", "get_solution")
+
+LOGGER = logging.getLogger(__name__)
+
+
+class Solution(object):
+    """
+    A unified interface to a `cobra.Model` optimization solution.
+
+    Notes
+    -----
+    Solution is meant to be constructed by `get_solution` please look at that
+    function to fully understand the `Solution` class.
+
+    Attributes
+    ----------
+    objective_value : float
+        The (optimal) value for the objective function.
+    status : str
+        The solver status related to the solution.
+    fluxes : pandas.Series
+        Contains the reaction fluxes (primal values of variables).
+    reduced_costs : pandas.Series
+        Contains reaction reduced costs (dual values of variables).
+    shadow_prices : pandas.Series
+        Contains metabolite shadow prices (dual values of constraints).
+
+    Deprecated Attributes
+    ---------------------
+    f : float
+        Use `objective_value` instead.
+    x : list
+        Use `fluxes.values` instead.
+    x_dict : pandas.Series
+        Use `fluxes` instead.
+    y : list
+        Use `reduced_costs.values` instead.
+    y_dict : pandas.Series
+        Use `reduced_costs` instead.
+    """
+
+    def __init__(self, objective_value, status, fluxes, reduced_costs=None,
+                 shadow_prices=None, **kwargs):
+        """
+        Initialize a `Solution` from its components.
+
+        Parameters
+        ----------
+        objective_value : float
+            The (optimal) value for the objective function.
+        status : str
+            The solver status related to the solution.
+        fluxes : pandas.Series
+            Contains the reaction fluxes (primal values of variables).
+        reduced_costs : pandas.Series
+            Contains reaction reduced costs (dual values of variables).
+        shadow_prices : pandas.Series
+            Contains metabolite shadow prices (dual values of constraints).
+        """
+        super(Solution, self).__init__(**kwargs)
+        self.objective_value = objective_value
+        self.status = status
+        self.fluxes = fluxes
+        self.reduced_costs = reduced_costs
+        self.shadow_prices = shadow_prices
+
+    def __repr__(self):
+        """String representation of the solution instance."""
+        if self.status != OPTIMAL:
+            return "<Solution {0:s} at 0x{1:x}>".format(self.status, id(self))
+        return "<Solution {0:.3f} at 0x{1:x}>".format(self.objective_value,
+                                                      id(self))
+
+    def _repr_html_(self):
+        if self.status == OPTIMAL:
+            html = ('<strong><em>Optimal</em> solution with objective value '
+                    '{:.3f}</strong><br>{}'
+                    .format(self.objective_value,
+                            self.to_frame()._repr_html_()))
+        else:
+            html = '<strong><em>{}</em> solution</strong>'.format(self.status)
+        return html
+
+    def __dir__(self):
+        """Hide deprecated attributes and methods from the public interface."""
+        fields = sorted(dir(type(self)) + list(self.__dict__))
+        fields.remove('f')
+        fields.remove('x')
+        fields.remove('y')
+        fields.remove('x_dict')
+        fields.remove('y_dict')
+        return fields
+
+    def __getitem__(self, reaction_id):
+        """
+        Return the flux of a reaction.
+
+        Parameters
+        ----------
+        reaction : str
+            A model reaction ID.
+        """
+        return self.fluxes[reaction_id]
+
+    get_primal_by_id = __getitem__
+
+    @property
+    def f(self):
+        """Deprecated property for getting the objective value."""
+        warn("use solution.objective_value instead", DeprecationWarning)
+        return self.objective_value
+
+    @property
+    def x_dict(self):
+        """Deprecated property for getting fluxes."""
+        warn("use solution.fluxes instead", DeprecationWarning)
+        return self.fluxes
+
+    @x_dict.setter
+    def x_dict(self, fluxes):
+        """Deprecated property for setting fluxes."""
+        warn("let Model.optimize create a solution instance,"
+             " don't update yourself", DeprecationWarning)
+        self.fluxes = fluxes
+
+    @property
+    def x(self):
+        """Deprecated property for getting flux values."""
+        warn("use solution.fluxes.values() instead", DeprecationWarning)
+        return self.fluxes.values
+
+    @property
+    def y_dict(self):
+        """Deprecated property for getting reduced costs."""
+        warn("use solution.reduced_costs instead", DeprecationWarning)
+        return self.reduced_costs
+
+    @y_dict.setter
+    def y_dict(self, costs):
+        """Deprecated property for setting reduced costs."""
+        warn("let Model create a solution instance, don't update yourself",
+             DeprecationWarning)
+        self.reduced_costs = costs
+
+    @property
+    def y(self):
+        """Deprecated property for getting reduced cost values."""
+        warn("use solution.reduced_costs.values() instead", DeprecationWarning)
+        return self.reduced_costs.values
+
+    def to_frame(self):
+        """Return the fluxes and reduced costs as a data frame"""
+        return DataFrame({'fluxes': self.fluxes,
+                          'reduced_costs': self.reduced_costs})
+
+
+class LegacySolution(object):
+    """
+    Legacy support for an interface to a `cobra.Model` optimization solution.
+
+    Attributes
+    ----------
+    f : float
+        The objective value
+    solver : str
+        A string indicating which solver package was used.
+    x : iterable
+        List or Array of the fluxes (primal values).
+    x_dict : dict
+        A dictionary of reaction IDs that maps to the respective primal values.
+    y : iterable
+        List or Array of the dual values.
+    y_dict : dict
+        A dictionary of reaction IDs that maps to the respective dual values.
+
+    Warning
+    -------
+    The LegacySolution class and its interface is deprecated.
+    """
+
+    def __init__(self, f, x=None, x_dict=None, y=None, y_dict=None,
+                 solver=None, the_time=0, status='NA', **kwargs):
+        """
+        Initialize a `LegacySolution` from an objective value.
+
+        Parameters
+        ----------
+        f : float
+            Objective value.
+        solver : str, optional
+            A string indicating which solver package was used.
+        x : iterable, optional
+            List or Array of the fluxes (primal values).
+        x_dict : dict, optional
+            A dictionary of reaction IDs that maps to the respective primal
+            values.
+        y : iterable, optional
+            List or Array of the dual values.
+        y_dict : dict, optional
+            A dictionary of reaction IDs that maps to the respective dual
+            values.
+        the_time : int, optional
+        status : str, optional
+
+        .. warning :: deprecated
+        """
+        super(LegacySolution, self).__init__(**kwargs)
+        self.solver = solver
+        self.f = f
+        self.x = x
+        self.x_dict = x_dict
+        self.status = status
+        self.y = y
+        self.y_dict = y_dict
+
+    def __repr__(self):
+        """String representation of the solution instance."""
+        if self.status != "optimal":
+            return "<LegacySolution {0:s} at 0x{1:x}>".format(
+                self.status, id(self))
+        return "<LegacySolution {0:.3f} at 0x{1:x}>".format(
+            self.f, id(self))
+
+    def __getitem__(self, reaction_id):
+        """
+        Return the flux of a reaction.
+
+        Parameters
+        ----------
+        reaction_id : str
+            A reaction ID.
+        """
+        return self.x_dict[reaction_id]
+
+    def dress_results(self, model):
+        """
+        Method could be intended as a decorator.
+
+        .. warning :: deprecated
+        """
+        warn("unnecessary to call this deprecated function",
+             DeprecationWarning)
+
+
+def get_solution(model, reactions=None, metabolites=None, raise_error=False):
+    """
+    Generate a solution representation of the current solver state.
+
+    Parameters
+    ---------
+    model : cobra.Model
+        The model whose reactions to retrieve values for.
+    reactions : list, optional
+        An iterable of `cobra.Reaction` objects. Uses `model.reactions` by
+        default.
+    metabolites : list, optional
+        An iterable of `cobra.Metabolite` objects. Uses `model.metabolites` by
+        default.
+    raise_error : bool
+        If true, raise an OptimizationError if solver status is not optimal.
+
+    Returns
+    -------
+    cobra.Solution
+
+    Note
+    ----
+    This is only intended for the `optlang` solver interfaces and not the
+    legacy solvers.
+    """
+    check_solver_status(model.solver.status, raise_error=raise_error)
+    if reactions is None:
+        reactions = model.reactions
+    if metabolites is None:
+        metabolites = model.metabolites
+
+    rxn_index = list()
+    fluxes = empty(len(reactions))
+    reduced = empty(len(reactions))
+    var_primals = model.solver.primal_values
+    shadow = empty(len(metabolites))
+    if model.solver.is_integer:
+        reduced.fill(nan)
+        shadow.fill(nan)
+        for (i, rxn) in enumerate(reactions):
+            rxn_index.append(rxn.id)
+            fluxes[i] = var_primals[rxn.id] - var_primals[rxn.reverse_id]
+        met_index = [met.id for met in metabolites]
+    else:
+        var_duals = model.solver.reduced_costs
+        for (i, rxn) in enumerate(reactions):
+            forward = rxn.id
+            reverse = rxn.reverse_id
+            rxn_index.append(forward)
+            fluxes[i] = var_primals[forward] - var_primals[reverse]
+            reduced[i] = var_duals[forward] - var_duals[reverse]
+        met_index = list()
+        constr_duals = model.solver.shadow_prices
+        for (i, met) in enumerate(metabolites):
+            met_index.append(met.id)
+            shadow[i] = constr_duals[met.id]
+    return Solution(model.solver.objective.value, model.solver.status,
+                    Series(index=rxn_index, data=fluxes, name="fluxes"),
+                    Series(index=rxn_index, data=reduced,
+                           name="reduced_costs"),
+                    Series(index=met_index, data=shadow, name="shadow_prices"))
diff --git a/cobra/core/Species.py b/cobra/core/species.py
similarity index 77%
rename from cobra/core/Species.py
rename to cobra/core/species.py
index fc51cdd..0aa3a0b 100644
--- a/cobra/core/Species.py
+++ b/cobra/core/species.py
@@ -1,22 +1,25 @@
-from warnings import warn
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 from copy import deepcopy
-from .Object import Object
+
+from cobra.core.object import Object
 
 
 class Species(Object):
     """Species is a class for holding information regarding
     a chemical Species
 
-
+    Parameters
+    ----------
+    id : string
+       An identifier for the chemical species
+    name : string
+       A human readable name.
     """
 
     def __init__(self, id=None, name=None):
-        """
-        id: A string.
-
-        name: String.  A human readable name.
-
-        """
         Object.__init__(self, id, name)
         self._model = None
         # references to reactions that operate on this species
@@ -41,7 +44,7 @@ class Species(Object):
 
         Additionally, a copy of a reaction is no longer in a cobra.Model.
 
-        This should be fixed with self.__deecopy__ if possible
+        This should be fixed with self.__deepcopy__ if possible
         """
         return deepcopy(self)
 
diff --git a/cobra/design/__init__.py b/cobra/design/__init__.py
index fa6a8c1..ef21432 100644
--- a/cobra/design/__init__.py
+++ b/cobra/design/__init__.py
@@ -1 +1,5 @@
-from .design_algorithms import *
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from cobra.design.design_algorithms import *
diff --git a/cobra/design/design_algorithms.py b/cobra/design/design_algorithms.py
index 6dd3828..c3d1165 100644
--- a/cobra/design/design_algorithms.py
+++ b/cobra/design/design_algorithms.py
@@ -1,429 +1,17 @@
-from ..core import Model, Reaction, Metabolite
-from ..manipulation.modify import canonical_form
+# -*- coding: utf-8 -*-
 
-from six import iteritems
-from copy import deepcopy
+from __future__ import absolute_import
 
+from cobra.exceptions import DefunctError
 
-def _add_decision_variable(model, reaction_id):
-    """Add an integer decision variable for the given reaction."""
-    reaction = model.reactions.get_by_id(reaction_id)
-    # add integer variable
-    var = Reaction("%s_decision_var" % reaction_id)
-    var.lower_bound = 0
-    var.upper_bound = 1
-    var.variable_kind = "integer"
-    var.decision_reaction_id = reaction_id
-    model.add_reaction(var)
-    # add constraints
-    # v <= ub * y  -->  v - ub * y <= 0
-    ub_constr = Metabolite("%s_upper_bound" % var.id)
-    ub_constr._constraint_sense = "L"
-    # v >= lb * y  -->  - v + lb * y <= 0
-    lb_constr = Metabolite("%s_lower_bound" % var.id)
-    lb_constr._constraint_sense = "L"
-    reaction.add_metabolites({lb_constr: - 1,
-                              ub_constr:   1})
-    var.add_metabolites({lb_constr:   reaction.lower_bound,
-                         ub_constr: - reaction.upper_bound})
-    return var
+__all__ = ("set_up_optknock", "dual_problem")
 
 
-def set_up_optknock(model, chemical_objective, knockable_reactions,
-                    biomass_objective=None, n_knockouts=5,
-                    n_knockouts_required=True, dual_maximum=1000, copy=True):
-    """Set up the OptKnock problem described by Burgard et al., 2003:
+def set_up_optknock(*args, **kwargs):
+    raise DefunctError('set_up_optknock',
+                       'cameo.strain_design.OptKnock',
+                       'https://github.com/biosustain/cameo')
 
-    Burgard AP, Pharkya P, Maranas CD. Optknock: a bilevel programming
-    framework for identifying gene knockout strategies for microbial strain
-    optimization. Biotechnol Bioeng. 2003;84(6):647-57.
-    https://doi.org/10.1002/bit.10803.
 
-    Arguments
-    ---------
-
-    model: :class:`~cobra.core.Model`
-        A COBRA model.
-
-    chemical_objective: str
-        The ID of the reaction to maximize in the outer problem.
-
-    knockable_reactions: [str]
-        A list of reaction IDs that can be knocked out.
-
-    biomass_objective: str
-        The ID of the reaction to maximize in the inner problem. By default,
-        this is the existing objective function in the passed model.
-
-    n_knockouts: int
-        The number of knockouts allowable.
-
-    n_knockouts_required: bool
-        Require exactly the number of knockouts specified by n_knockouts.
-
-    dual_maximum: float or int
-        The upper bound for dual variables.
-
-    copy: bool
-        Copy the model before making any modifications.
-
-
-    Zachary King 2015
-
-    """
-
-    if copy:
-        model = model.copy()
-
-    # add the integer decision variables
-    decision_variable_ids = [_add_decision_variable(model, r_id).id
-                             for r_id in knockable_reactions]
-
-    # inner problem
-    inner_problem = model.copy()
-    if biomass_objective:
-        found = False
-        for reaction in inner_problem.reactions:
-            obj = reaction.id == biomass_objective
-            reaction.objective_coefficient = 1 if obj else 0
-            if obj:
-                found = True
-        if not found:
-            raise Exception("Could not find biomass_objective %s in model" %
-                            biomass_objective)
-
-    # dual of inner problem
-    inner_dual = dual_problem(inner_problem,
-                              integer_vars_to_maintain=decision_variable_ids,
-                              already_irreversible=False, copy=False,
-                              dual_maximum=dual_maximum)
-
-    # add constraints and variables from inner problem to outer problem
-    inner_objectives = {}
-    for reaction in inner_dual.reactions:
-        inner_objectives[reaction.id] = reaction.objective_coefficient
-        reaction.objective_coefficient = 0
-        if reaction.id in model.reactions:
-            existing_reaction = model.reactions.get_by_id(reaction.id)
-            for met, coeff in iteritems(reaction._metabolites):
-                if met.id in model.metabolites:
-                    existing_reaction.add_metabolites(
-                        {model.metabolites.get_by_id(met.id): coeff})
-                else:
-                    existing_reaction.add_metabolites({deepcopy(met): coeff})
-        else:
-            model.add_reaction(reaction)
-
-    # constraint to set outer and inner objectives equal, and set chemical
-    # objective
-    equal_objectives_constr = Metabolite("equal_objectives_constraint")
-    equal_objectives_constr._constraint_sense = "E"
-    equal_objectives_constr._bound = 0
-    for reaction in model.reactions:
-        if reaction.objective_coefficient != 0:
-            reaction.add_metabolites({equal_objectives_constr:
-                                      reaction.objective_coefficient})
-        inner_objective = inner_objectives.get(reaction.id, 0)
-        if inner_objective:
-            reaction.add_metabolites(
-                {equal_objectives_constr: - inner_objective})
-        # set chemical objective
-        reaction.objective_coefficient = 1 \
-            if reaction.id == chemical_objective else 0
-
-    # add the n_knockouts constraint
-    n_knockouts_constr = Metabolite("n_knockouts_constraint")
-    n_knockouts_constr._constraint_sense = "E" if n_knockouts_required else "G"
-    n_knockouts_constr._bound = len(decision_variable_ids) - n_knockouts
-    for r_id in decision_variable_ids:
-        reaction = model.reactions.get_by_id(r_id)
-        reaction.add_metabolites({n_knockouts_constr: 1})
-
-    return model
-
-
-def run_optknock(optknock_problem, solver=None, tolerance_integer=1e-9,
-                 **kwargs):
-    """Run the OptKnock problem created with set_up_optknock.
-
-    Arguments
-    ---------
-
-    optknock_problem: :class:`~cobra.core.Model`
-        The problem generated by set_up_optknock.
-
-    solver: str
-        The name of the preferred solver.
-
-    tolerance_integer: float
-        The integer tolerance for the MILP.
-
-    **kwargs
-        Keyword arguments are passed to Model.optimize().
-
-
-    Zachary King 2015
-
-    """
-    solution = optknock_problem.optimize(solver=solver,
-                                         tolerance_integer=tolerance_integer,
-                                         **kwargs)
-    solution.knockouts = []
-    for reaction in optknock_problem.reactions:
-        if solution.x_dict.get(reaction.id, None) == 0:
-            r_id = getattr(reaction, "decision_reaction_id", None)
-            if r_id is not None:
-                solution.knockouts.append(r_id)
-    return solution
-
-
-# This function will generalize the set_up_optknock code to other MILPs:
-# def dual_embed(outer_model, inner_model, ..., objective_sense="maximize",
-#                integer_vars_to_maintain=[], already_irreversible=False,
-#                copy=True, dual_maximum=1000):
-#     """Embed the dual of the inner model within the outer model"""
-
-
-def dual_problem(model, objective_sense="maximize",
-                 integer_vars_to_maintain=[],
-                 already_irreversible=False, copy=True, dual_maximum=1000):
-    """Return a new model representing the dual of the model.
-
-    Make the problem irreversible, then take the dual. Convert the problem:
-
-    .. code-block:: none
-
-        Maximize (c^T)x subject to Ax <= b, x >= 0
-
-    which is something like this in COBRApy:
-
-    .. code-block:: none
-
-        Maximize sum(objective_coefficient_j * reaction_j for all j)
-            s.t.
-            sum(coefficient_i_j * reaction_j for all j) <= metabolite_bound_i
-            reaction_j <= upper_bound_j
-            reaction_j >= 0
-
-    to the problem:
-
-    .. code-block:: none
-
-        Minimize (b^T)w subject to (A^T)w >= c, w >= 0
-
-    which is something like this in COBRApy (S matrix is m x n):
-
-    .. code-block:: none
-
-        Minimize sum( metabolite_bound_i * dual_i   for all i ) +
-                 sum( upper_bound_j *      dual_m+j for all j ) +
-            s.t.
-             sum( coefficient_i_j * dual_i for all i ) +
-             sum( dual_2m+j' for all j' ) >= objective_coefficient_j
-            dual_k >= 0
-
-
-    Arguments
-    ---------
-
-    model : :class:`~cobra.core.Model`
-        The COBRA model.
-
-    objective_sense: str
-        The objective sense of the starting problem, either 'maximize' or
-        'minimize'. A minimization problems will be converted to a maximization
-        before taking the dual. This function always returns a minimization
-        problem.
-
-    iteger_vars_to_maintain: [str]
-        A list of IDs for Boolean integer variables to be maintained in the
-        dual problem. See 'Maintaining integer variables' below for more
-        details.
-
-    already_irreversible: bool
-        If True, then do not convert the model to irreversible.
-
-    copy: bool
-        If True, then make a copy of the model before modifying it. This is not
-        necessary if already_irreversible is True.
-
-    dual_maximum: float or int
-        The upper bound for dual variables.
-
-
-    **Maintaining integer variables**
-
-    The argument ``integer_vars_to_maintain`` can be used to specify certin
-    Boolean integer variables that will be maintained in the dual problem. This
-    makes it possible to join outer and inner problems in a bi-level MILP. The
-    method for maintaining integer variables is described by Tepper and Shlomi,
-    2010:
-
-    Tepper N, Shlomi T. Predicting metabolic engineering knockout strategies
-    for chemical production: accounting for competing pathways. Bioinformatics.
-    2010;26(4):536-43. https://doi.org/10.1093/bioinformatics/btp704.
-
-    In COBRApy, this roughly translates to transforming (decision variables p,
-    integer constraints o):
-
-    .. code-block:: none
-
-        Maximize (c^T)x subject to (A_x)x + (A_y)y <= b, x >= 0
-
-        (1) Maximize sum(objective_coefficient_j * reaction_j for all j)
-                s.t.
-        (2)     sum(coeff_i_j * reaction_j for all j) +
-                sum(decision_coeff_i_j * decision_var_j for all j)
-                <= metabolite_bound_i
-        (3)     reaction_j <= upper_bound_j
-        (4)     reaction_j >= 0
-
-    to the problem:
-
-    .. code-block:: none
-
-        Minimize (b - (A_y)y)^T w subject to (A_x^T)w >= c, w >= 0
-
-    which linearizes to (with auxiliary variables z):
-
-    .. code-block:: none
-
-        Minimize (b^T)w - { ((A_y)y)^T w with yw --> z }
-        subject to (A_x^T)w >= c, linearization constraints, w >= 0
-          Linearization constraints: z <= w_max * y, z <= w,
-                                     z >= w - w_max * (1 - y), z >= 0
-
-        (5) Minimize sum( metabolite_bound_i *  dual_i            for all i ) +
-                      sum( upper_bound_j *      dual_m+j          for all j ) +
-                    - sum( decision_coeff_i_j * auxiliary_var_i_j
-                          for all combinations i, j )
-                s.t.
-        (6)   - sum( coefficient_i_j * dual_i for all i ) - dual_m+j
-              <= - objective_coefficient_j
-        (7)     auxiliary_var_i_j - dual_maximum * decision_var_j          <= 0
-        (8)     auxiliary_var_i_j - dual_i                                 <= 0
-        (9)   - auxiliary_var_i_j + dual_i + dual_maximum * decision_var_j
-              <= dual_maximum
-       (10)     dual_maximum >= dual_i            >= 0
-       (11)     dual_maximum >= dual_m+j          >= 0
-       (12)     dual_maximum >= auxiliary_var_i_j >= 0
-       (13)                1 >= decision_var_j    >= 0
-
-
-    Zachary King 2015
-
-    """
-
-    # convert to canonical form and copy
-    model = canonical_form(model, objective_sense=objective_sense,
-                           already_irreversible=already_irreversible,
-                           copy=copy)
-
-    # new model for the dual
-    dual = Model("%s_dual" % model.id)
-
-    # keep track of dual_i
-    dual_var_for_met = {}
-
-    # add dual variables for constraints. (2) --> dual_i
-    for metabolite in model.metabolites:
-        # add constraints based on metabolite constraint sense
-        if metabolite._constraint_sense != "L":
-            raise Exception("Not a less than or equal constraint: %s"
-                            % metabolite.id)
-
-        var = Reaction("%s__dual" % metabolite.id)
-        # Without auxiliary variables, the objective coefficient would include
-        # integer variables when present. However, we will separate out the
-        # integer parts into objective coefficients for auxiliary variables.
-        var.objective_coefficient = metabolite._bound  # (5)
-        # [dual_vars] >= 0
-        var.lower_bound = 0
-        var.upper_bound = dual_maximum
-        dual.add_reaction(var)
-        # remember
-        dual_var_for_met[metabolite.id] = var
-
-    # keep track of decision variables (integer_vars_to_maintain) as tuples:
-    # (reaction in dual problem, reaction in original problem)
-    integer_vars_added = []
-
-    # add constraints and upper bound variables
-    for reaction in model.reactions:
-        # integer vars to maintain
-        if reaction.id in integer_vars_to_maintain:
-            # keep these integer variables in the dual, with new transformed
-            # constraints
-            if (reaction.lower_bound not in [0, 1] or
-                    reaction.upper_bound not in [0, 1] or
-                    reaction.variable_kind != "integer"):
-                raise Exception("Reaction %s from integer_vars_to_maintain is "
-                                "not a Boolean integer variable" % reaction.id)
-            integer_var = Reaction(reaction.id)
-            integer_var.upper_bound = reaction.upper_bound
-            integer_var.lower_bound = reaction.lower_bound
-            integer_var.variable_kind = reaction.variable_kind
-            integer_var.objective_coefficient = 0
-            # constraints
-            dual.add_reaction(integer_var)
-            integer_vars_added.append((integer_var, reaction))
-
-        # other vars
-        else:
-            # other variables become constraints, (1) to (6)
-            constr = Metabolite("%s__dual_constrained_by_c" %
-                                reaction.id)  # (6)
-            constr._constraint_sense = "L"
-            constr._bound = - reaction.objective_coefficient
-            for met, coeff in iteritems(reaction._metabolites):
-                dual_var = dual_var_for_met[met.id]
-                dual_var.add_metabolites({constr: - coeff})
-
-            # upper bound constraints -> variables (3) to (5) and (6)
-            var_bound = Reaction("%s__dual_for_upper_bound_constraint" %
-                                 reaction.id)  # dual_m+j
-            var_bound.objective_coefficient = reaction.upper_bound  # (5)
-            # [dual_vars] >= 0
-            var_bound.lower_bound = 0
-            var_bound.upper_bound = dual_maximum
-            # add bound dual variables to dual constraints
-            var_bound.add_metabolites({constr: -1})  # (6)
-            dual.add_reaction(var_bound)
-
-    # add auxiliary variables
-    for integer_var, original_reaction in integer_vars_added:
-        for metabolite, coeff in iteritems(original_reaction._metabolites):
-            dual_var = dual_var_for_met[metabolite.id]
-            # create an auxiliary variable
-            aux_var = Reaction("%s__auxiliary__%s" % (integer_var.id,
-                                                      dual_var.id))
-            aux_var.lower_bound = 0
-            aux_var.upper_bound = dual_maximum
-            aux_var.objective_coefficient = - coeff
-            dual.add_reaction(aux_var)
-
-            # add linearization constraints
-            # (7)     auxiliary_var_i_j - dual_maximum * decision_var_j    <= 0
-            le_decision_constr = Metabolite("%s__le_decision" % aux_var.id)
-            le_decision_constr._constraint_sense = "L"
-            le_decision_constr._bound = 0
-            aux_var.add_metabolites({le_decision_constr: 1})
-            integer_var.add_metabolites({le_decision_constr: - dual_maximum})
-
-            # (8)     auxiliary_var_i_j - dual_i                           <= 0
-            le_dual_constr = Metabolite("%s__le_dual" % aux_var.id)
-            le_dual_constr._constraint_sense = "L"
-            le_dual_constr._bound = 0
-            aux_var.add_metabolites({le_dual_constr: 1})
-            dual_var.add_metabolites({le_dual_constr: -1})
-
-            # (9)   - auxiliary_var_i_j + dual_i +
-            #         dual_maximum * decision_var_j <= dual_maximum
-            g_constr = Metabolite("%s__g_dual" % aux_var.id)
-            g_constr._constraint_sense = "L"
-            g_constr._bound = dual_maximum
-            aux_var.add_metabolites({g_constr: -1})
-            dual_var.add_metabolites({g_constr: 1})
-            integer_var.add_metabolites({g_constr: dual_maximum})
-
-    return dual
+def dual_problem(*args, **kwargs):
+    raise DefunctError('dual_problem')
diff --git a/cobra/exceptions.py b/cobra/exceptions.py
new file mode 100644
index 0000000..b277aef
--- /dev/null
+++ b/cobra/exceptions.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, print_function
+
+import optlang.interface
+
+
+class OptimizationError(Exception):
+    def __init__(self, message):
+        super(OptimizationError, self).__init__(message)
+
+
+class Infeasible(OptimizationError):
+    pass
+
+
+class Unbounded(OptimizationError):
+    pass
+
+
+class FeasibleButNotOptimal(OptimizationError):
+    pass
+
+
+class UndefinedSolution(OptimizationError):
+    pass
+
+
+OPTLANG_TO_EXCEPTIONS_DICT = dict((
+    (optlang.interface.INFEASIBLE, Infeasible),
+    (optlang.interface.UNBOUNDED, Unbounded),
+    (optlang.interface.FEASIBLE, FeasibleButNotOptimal),
+    (optlang.interface.UNDEFINED, UndefinedSolution)))
+
+
+class DefunctError(Exception):
+    """Exception for retired functionality
+
+    Parameters
+    ----------
+    what : string
+        The name of the retired object
+    alternative : string
+        Suggestion for an alternative
+    url : string
+        A url to alternative resource
+    """
+
+    def __init__(self, what, alternative=None, url=None):
+        message = "{} has been removed from cobrapy".format(what)
+        if alternative is None:
+            message += (" without replacement. Raise an issue at "
+                        "https://github.com/opencobra/cobrapy if you miss it.")
+        if alternative is not None:
+            message += ". Consider using '{}' instead".format(alternative)
+        if url is not None:
+            message += " [{}]".format(url)
+        super(DefunctError, self).__init__(message)
diff --git a/cobra/flux_analysis/__init__.py b/cobra/flux_analysis/__init__.py
index df132cd..3f6b8d7 100644
--- a/cobra/flux_analysis/__init__.py
+++ b/cobra/flux_analysis/__init__.py
@@ -1,20 +1,21 @@
-try:
-    import numpy
-except:
-    numpy = None
+# -*- coding: utf-8 -*-
 
-from .essentiality import assess_medium_component_essentiality
-from .variability import flux_variability_analysis, find_blocked_reactions
-from .single_deletion import single_gene_deletion, single_reaction_deletion
-from .parsimonious import optimize_minimal_flux
-from .loopless import construct_loopless_model
-from .gapfilling import growMatch
+try:
+    import scipy
+except ImportError:
+    scipy = None
 
-if numpy:
-    from .double_deletion import double_reaction_deletion, double_gene_deletion
-    from .phenotype_phase_plane import calculate_phenotype_phase_plane
-else:
-    from warnings import warn
-    warn("double deletions and phase planes requires numpy")
-    del warn
-del numpy
+from cobra.flux_analysis.gapfilling import gapfill, growMatch
+from cobra.flux_analysis.loopless import (
+    construct_loopless_model, loopless_solution, add_loopless)
+from cobra.flux_analysis.parsimonious import pfba
+from cobra.flux_analysis.single_deletion import (
+    single_gene_deletion, single_reaction_deletion)
+from cobra.flux_analysis.variability import (
+    find_blocked_reactions, flux_variability_analysis, find_essential_genes,
+    find_essential_reactions)
+from cobra.flux_analysis.double_deletion import (
+    double_reaction_deletion, double_gene_deletion)
+from cobra.flux_analysis.phenotype_phase_plane import (
+    calculate_phenotype_phase_plane, production_envelope)
+from cobra.flux_analysis.sampling import sample
diff --git a/cobra/flux_analysis/deletion_worker.py b/cobra/flux_analysis/deletion_worker.py
index 359e7f2..96cbe6e 100644
--- a/cobra/flux_analysis/deletion_worker.py
+++ b/cobra/flux_analysis/deletion_worker.py
@@ -1,8 +1,13 @@
-from multiprocessing import Queue, Process, cpu_count
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from multiprocessing import Process, Queue, cpu_count
 
-from ..solvers import get_solver_name, solver_dict
 from six import iteritems
 
+from cobra.solvers import get_solver_name, solver_dict
+
 
 def compute_fba_deletion_worker(cobra_model, solver, job_queue, output_queue,
                                 **kwargs):
diff --git a/cobra/flux_analysis/double_deletion.py b/cobra/flux_analysis/double_deletion.py
index 945ad7e..0b21f6a 100644
--- a/cobra/flux_analysis/double_deletion.py
+++ b/cobra/flux_analysis/double_deletion.py
@@ -1,13 +1,19 @@
-from warnings import warn
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 from itertools import chain, product
+from warnings import warn
 
-from six import iteritems
 import numpy
+from pandas import DataFrame
+from six import iteritems
 
-from ..solvers import get_solver_name, solver_dict
-from ..manipulation.delete import (find_gene_knockout_reactions,
-                                   get_compiled_gene_reaction_rules)
-from .deletion_worker import CobraDeletionPool, CobraDeletionMockPool
+from cobra.flux_analysis.deletion_worker import (
+    CobraDeletionMockPool, CobraDeletionPool)
+from cobra.manipulation.delete import (
+    find_gene_knockout_reactions, get_compiled_gene_reaction_rules)
+from cobra.solvers import get_solver_name, solver_dict
 
 try:
     import scipy
@@ -16,11 +22,6 @@ except ImportError:
 else:
     from . import moma
 
-try:
-    from pandas import DataFrame
-except:
-    DataFrame = None
-
 
 # Utility functions
 def generate_matrix_indexes(ids1, ids2):
@@ -89,11 +90,10 @@ def format_results_frame(row_ids, column_ids, matrix, return_frame=False):
 
     Otherwise returns a dict of
     {"x": row_ids, "y": column_ids", "data": result_matrx}"""
-    if return_frame and DataFrame:
+    if return_frame:
         return DataFrame(data=matrix, index=row_ids, columns=column_ids)
-    elif return_frame and not DataFrame:
-        warn("could not import pandas.DataFrame")
-    return {"x": row_ids, "y": column_ids, "data": matrix}
+    else:
+        return {"x": row_ids, "y": column_ids, "data": matrix}
 
 
 def double_deletion(cobra_model, element_list_1=None, element_list_2=None,
diff --git a/cobra/flux_analysis/essentiality.py b/cobra/flux_analysis/essentiality.py
deleted file mode 100644
index be46440..0000000
--- a/cobra/flux_analysis/essentiality.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from warnings import warn
-try:
-    from cobra.flux_analysis.moma import moma
-except:
-    warn("moma does not appear to be functional on your system")
-from cobra.manipulation import initialize_growth_medium
-
-
-def assess_medium_component_essentiality(cobra_model, the_components=None,
-                                         the_medium=None,
-                                         medium_compartment='e', solver='glpk',
-                                         the_condition=None, method='fba'):
-    """Determines which components in an in silico medium are essential for
-    growth in the context of the remaining components.
-
-    cobra_model: A Model object.
-
-    the_components: None or a list of external boundary reactions that will be
-    sequentially disabled.
-
-    the_medium: Is None, a string, or a dictionary.  If a string then the
-    initialize_growth_medium function expects that the_model has an attribute
-    dictionary called media_compositions, which is a dictionary of dictionaries
-    for various medium compositions.  Where a medium composition is a
-    dictionary of external boundary reaction ids for the medium components and
-    the external boundary fluxes for each medium component.
-
-    medium_compartment: the compartment in which the boundary reactions
-    supplying the medium components exist
-
-    NOTE: that these fluxes must be negative because the convention is
-    backwards means something is feed into the system.
-
-    solver: 'glpk', 'gurobi', or 'cplex'
-
-    returns: essentiality_dict:  A dictionary providing the maximum growth rate
-    accessible when the respective component is removed from the medium.
-
-    """
-    if method.lower() == 'moma':
-        wt_model = cobra_model.copy()
-    cobra_model = cobra_model.copy()
-
-    if isinstance(the_medium, str):
-        try:
-            the_medium = cobra_model.media_compositions[the_medium]
-        except:
-            raise Exception(
-                the_medium + " is not in cobra_model.media_compositions")
-    if the_medium is not None:
-        initialize_growth_medium(cobra_model, the_medium, medium_compartment)
-        if the_components is None:
-            the_components = the_medium.keys()
-    if not the_components:
-        raise Exception("You need to specify the_components or the_medium")
-    essentiality_dict = {}
-    for the_component in the_components:
-        the_reaction = cobra_model.reactions.get_by_id(the_component)
-        original_lower_bound = float(the_reaction.lower_bound)
-        the_reaction.lower_bound = 0.
-        if method.lower() == 'fba':
-            cobra_model.optimize(solver=solver)
-            objective_value = cobra_model.solution.f
-        elif method.lower() == 'moma':
-            objective_value = moma(wt_model, cobra_model, solver=solver)[
-                'objective_value']
-        essentiality_dict[the_component] = objective_value
-        the_reaction.lower_bound = original_lower_bound
-
-    return(essentiality_dict)
diff --git a/cobra/flux_analysis/gapfilling.py b/cobra/flux_analysis/gapfilling.py
index 7f52b4f..ba5841c 100644
--- a/cobra/flux_analysis/gapfilling.py
+++ b/cobra/flux_analysis/gapfilling.py
@@ -1,166 +1,330 @@
-from __future__ import print_function
+# -*- coding: utf-8 -*-
 
-from ..core import Model, Reaction, Metabolite
-from ..solvers import get_solver_name
-from ..manipulation import modify
+from __future__ import absolute_import
 
+from sympy import Add
+from warnings import warn
 
-class SUXModelMILP(Model):
-    """Model with additional Universal and Exchange reactions.
-    Adds corresponding dummy reactions and dummy metabolites for each added
-    reaction which are used to impose MILP constraints to minimize the
-    total number of added reactions. See the figure for more
-    information on the structure of the matrix.
-    """
+from optlang.interface import OPTIMAL
+from cobra.core import Model
+from cobra.util import fix_objective_as_constraint
+
+
+class GapFiller(object):
+    """Class for performing gap filling.
+
+    This class implements gap filling based on a mixed-integer approach,
+    very similar to that described in [1]_ and the 'no-growth but growth'
+    part of [2]_ but with minor adjustments. In short, we add indicator
+    variables for using the reactions in the universal model, z_i and then
+    solve problem
+
+    minimize \sum_i c_i * z_i
+    s.t. Sv = 0
+         v_o >= t
+         lb_i <= v_i <= ub_i
+         v_i = 0 if z_i = 0
+
+    where lb, ub are the upper, lower flux bounds for reaction i, c_i is a
+    cost parameter and the objective v_o is greater than the lower bound t.
+    The default costs are 1 for reactions from the universal model, 100 for
+    exchange (uptake) reactions added and 1 for added demand reactions.
+
+    Note that this is a mixed-integer linear program and as such will
+    expensive to solve for large models. Consider using alternatives [3]_
+    such as CORDA instead [4,5]_.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to perform gap filling on.
+    universal : cobra.Model
+        A universal model with reactions that can be used to complete the
+        model.
+    lower_bound : float
+        The minimally accepted flux for the objective in the filled model.
+    penalties : dict, None
+        A dictionary with keys being 'universal' (all reactions included in
+        the universal model), 'exchange' and 'demand' (all additionally
+        added exchange and demand reactions) for the three reaction types.
+        Can also have reaction identifiers for reaction specific costs.
+        Defaults are 1, 100 and 1 respectively.
+    integer_threshold : float
+        The threshold at which a value is considered non-zero (aka
+        integrality threshold). If gapfilled models fail to validate,
+        you may want to lower this value.
+    exchange_reactions : bool
+        Consider adding exchange (uptake) reactions for all metabolites
+        in the model.
+    demand_reactions : bool
+        Consider adding demand reactions for all metabolites.
+
+    References
+    ----------
+    .. [1] Reed, Jennifer L., Trina R. Patel, Keri H. Chen, Andrew R. Joyce,
+       Margaret K. Applebee, Christopher D. Herring, Olivia T. Bui, Eric M.
+       Knight, Stephen S. Fong, and Bernhard O. Palsson. “Systems Approach
+       to Refining Genome Annotation.” Proceedings of the National Academy
+       of Sciences 103, no. 46 (2006): 17480–17484.
+
+       [2] Kumar, Vinay Satish, and Costas D. Maranas. “GrowMatch: An
+       Automated Method for Reconciling In Silico/In Vivo Growth
+       Predictions.” Edited by Christos A. Ouzounis. PLoS Computational
+       Biology 5, no. 3 (March 13, 2009): e1000308.
+       doi:10.1371/journal.pcbi.1000308.
+
+       [3] http://opencobra.github.io/cobrapy/tags/gapfilling/
 
-    def __init__(self, model, Universal=None, threshold=.05,
-                 penalties=None, dm_rxns=True, ex_rxns=False):
-        Model.__init__(self, "")
-        # store parameters
-        self.threshold = threshold
-        if penalties is None:
-            self.penalties = {"Universal": 1, "Exchange": 100, "Demand": 1}
-        else:
-            self.penalties = penalties
-        # want to only operate on a copy of Universal so as not to mess up
-        # is this necessary?
-        if Universal is None:
-            Universal = Model("Universal_Reactions")
-        else:
-            Universal = Universal.copy()
-
-        modify.convert_to_irreversible(Universal)
-
-        for rxn in Universal.reactions:
-            rxn.notes["gapfilling_type"] = "Universal"
-
-        # SUX += Exchange (when exchange generator has been written)
-        # For now, adding exchange reactions to Universal - could add to a new
-        # model called exchange and allow their addition or not....
-        if ex_rxns:
-            for m in model.metabolites:
-                rxn = Reaction('SMILEY_EX_' + m.id)
-                rxn.lower_bound = 0
-                rxn.upper_bound = 1000
-                rxn.add_metabolites({m: 1.0})
-                rxn.notes["gapfilling_type"] = "Exchange"
-                Universal.add_reaction(rxn)
-
-        if dm_rxns:
-            # ADD DEMAND REACTIONS FOR ALL METABOLITES TO UNIVERSAL MODEL
-            for m in model.metabolites:
-                rxn = Reaction('SMILEY_DM_' + m.id)
-                rxn.lower_bound = 0
-                rxn.upper_bound = 1000
-                rxn.add_metabolites({m: -1.0})
-                rxn.notes["gapfilling_type"] = "Demand"
-                Universal.add_reaction(rxn)
-
-        Model.add_reactions(self, model.copy().reactions)
-        Model.add_reactions(self, Universal.reactions)
-
-        # all reactions with an index < len(model.reactions) were original
-        self.original_reactions = self.reactions[:len(model.reactions)]
-        self.added_reactions = self.reactions[len(model.reactions):]
-
-        # Add MILP indicator reactions
-        indicators = []
-        for reaction in self.added_reactions:
-            dummy_metabolite = Metabolite("dummy_met_" + reaction.id)
-            dummy_metabolite._constraint_sense = "L"
-            reaction.add_metabolites({dummy_metabolite: 1})
-            indicator_reaction = Reaction("indicator_" + reaction.id)
-            indicator_reaction.add_metabolites(
-                {dummy_metabolite: -1 * reaction.upper_bound})
-            indicator_reaction.lower_bound = 0
-            indicator_reaction.upper_bound = 1
-            indicator_reaction.variable_kind = "integer"
-            indicator_reaction.objective_coefficient = \
-                self.penalties[reaction.notes["gapfilling_type"]]
-            indicators.append(indicator_reaction)
-        Model.add_reactions(self, indicators)
-
-        # original reaction objectives need to be set to lower bounds
-        self._update_objectives()
-
-    def _update_objectives(self, added=True):
-        """Update the metabolite which encodes the objective function
-        with the objective coefficients for the reaction, and impose
-        penalties for added reactions.
+       [4] Schultz, André, and Amina A. Qutub. “Reconstruction of
+       Tissue-Specific Metabolic Networks Using CORDA.” Edited by Costas D.
+       Maranas. PLOS Computational Biology 12, no. 3 (March 4, 2016):
+       e1004808. doi:10.1371/journal.pcbi.1004808.
+
+       [5] Diener, Christian https://github.com/cdiener/corda
+     """
+
+    def __init__(self, model, universal=None, lower_bound=0.05,
+                 penalties=None, exchange_reactions=False,
+                 demand_reactions=True, integer_threshold=1e-6):
+        self.original_model = model
+        self.lower_bound = lower_bound
+        self.model = model.copy()
+        tolerances = self.model.solver.configuration.tolerances
+        tolerances.integrality = integer_threshold
+        self.universal = universal.copy() if universal else Model('universal')
+        self.penalties = dict(universal=1, exchange=100, demand=1)
+        if penalties is not None:
+            self.penalties.update(penalties)
+        self.integer_threshold = integer_threshold
+        self.indicators = list()
+        self.costs = dict()
+        self.extend_model(exchange_reactions, demand_reactions)
+        fix_objective_as_constraint(self.model, bound=lower_bound)
+        self.add_switches_and_objective()
+
+    def extend_model(self, exchange_reactions=False, demand_reactions=True):
+        """Extend gapfilling model.
+
+        Add reactions from universal model and optionally exchange and
+        demand reactions for all metabolites in the model to perform
+        gapfilling on.
+
+        Parameters
+        ----------
+        exchange_reactions : bool
+            Consider adding exchange (uptake) reactions for all metabolites
+            in the model.
+        demand_reactions : bool
+            Consider adding demand reactions for all metabolites.
         """
-        for reaction in self.original_reactions:
-            if reaction.objective_coefficient > 0:
-                reaction.lower_bound = max(
-                    reaction.lower_bound,
-                    reaction.objective_coefficient * self.threshold)
-            reaction.objective_coefficient = 0
-
-    def add_reactions(self, reactions):
-        Model.add_reactions(self, reactions)
-        self.original_reactions.extend(reactions)
-        self._update_objectives()
-
-    def solve(self, solver=None, iterations=1, debug=False, time_limit=100,
-              **solver_parameters):
-        """solve the MILP problem"""
-        if solver is None:
-            solver = get_solver_name(mip=True)
-        used_reactions = [None] * iterations
-        numeric_error_cutoff = 0.0001
-        self._update_objectives()
-        for i in range(iterations):
-            used_reactions[i] = []
-            self.optimize(objective_sense="minimize",
-                          solver=solver, **solver_parameters)
-            if debug:
-                print("Iteration %d: Status is %s" % (i, self.solution.status))
-            for reaction in self.added_reactions:
-                # The dummy reaction should have a flux of either 0 or 1.
-                # If it is 1 (nonzero), then the reaction was used in
-                # the solution.
-                ind = self.reactions.get_by_id("indicator_" + reaction.id)
-                if ind.x > numeric_error_cutoff:
-                    used_reactions[i].append(reaction)
-                    ind.objective_coefficient += \
-                        self.penalties[reaction.notes["gapfilling_type"]]
-                    if debug:
-                        print('    ', reaction, reaction.objective_coefficient)
+        for rxn in self.universal.reactions:
+            rxn.gapfilling_type = 'universal'
+        for met in self.model.metabolites:
+            if exchange_reactions:
+                rxn = self.universal.add_boundary(
+                    met, type='exchange_smiley', lb=-1000, ub=0,
+                    reaction_id='EX_{}'.format(met.id))
+                rxn.gapfilling_type = 'exchange'
+            if demand_reactions:
+                rxn = self.universal.add_boundary(
+                    met, type='demand_smiley', lb=0, ub=1000,
+                    reaction_id='DM_{}'.format(met.id))
+                rxn.gapfilling_type = 'demand'
+
+        new_reactions = self.universal.reactions.query(
+            lambda reaction: reaction not in self.model.reactions
+        )
+        self.model.add_reactions(new_reactions)
+
+    def update_costs(self):
+        """Update the coefficients for the indicator variables in the objective.
+
+        Done incrementally so that second time the function is called,
+        active indicators in the current solutions gets higher cost than the
+        unused indicators.
+        """
+        for var in self.indicators:
+            if var not in self.costs:
+                self.costs[var] = var.cost
+            else:
+                if var._get_primal() > self.integer_threshold:
+                    self.costs[var] += var.cost
+        self.model.objective.set_linear_coefficients(self.costs)
+
+    def add_switches_and_objective(self):
+        """ Update gapfilling model with switches and the indicator objective.
+        """
+        constraints = list()
+        big_m = max(max(abs(b) for b in r.bounds)
+                    for r in self.model.reactions)
+        prob = self.model.problem
+        for rxn in self.model.reactions:
+            if not hasattr(rxn, 'gapfilling_type') or rxn.id.startswith('DM_'):
+                continue
+            indicator = prob.Variable(
+                name='indicator_{}'.format(rxn.id), lb=0, ub=1, type='binary')
+            if rxn.id in self.penalties:
+                indicator.cost = self.penalties[rxn.id]
+            else:
+                indicator.cost = self.penalties[rxn.gapfilling_type]
+            indicator.rxn_id = rxn.id
+            self.indicators.append(indicator)
+
+            # if z = 1 v_i is allowed non-zero
+            # v_i - Mz <= 0   and   v_i + Mz >= 0
+            constraint_lb = prob.Constraint(
+                rxn.flux_expression - big_m * indicator, ub=0,
+                name='constraint_lb_{}'.format(rxn.id), sloppy=True)
+            constraint_ub = prob.Constraint(
+                rxn.flux_expression + big_m * indicator, lb=0,
+                name='constraint_ub_{}'.format(rxn.id), sloppy=True)
+
+            constraints.extend([constraint_lb, constraint_ub])
+
+        self.model.add_cons_vars(self.indicators)
+        self.model.add_cons_vars(constraints, sloppy=True)
+        self.model.objective = prob.Objective(
+            Add(*self.indicators), direction='min')
+        self.update_costs()
+
+    def fill(self, iterations=1):
+        """Perform the gapfilling by iteratively solving the model, updating
+        the costs and recording the used reactions.
+
 
+        Parameters
+        ----------
+        iterations : int
+            The number of rounds of gapfilling to perform. For every
+            iteration, the penalty for every used reaction increases
+            linearly. This way, the algorithm is encouraged to search for
+            alternative solutions which may include previously used
+            reactions. I.e., with enough iterations pathways including 10
+            steps will eventually be reported even if the shortest pathway
+            is a single reaction.
+
+        Returns
+        -------
+        iterable
+            A list of lists where each element is a list reactions that were
+            used to gapfill the model.
+
+        Raises
+        ------
+        RuntimeError
+            If the model fails to be validated (i.e. the original model with
+            the proposed reactions added, still cannot get the required flux
+            through the objective).
+        """
+        used_reactions = list()
+        for i in range(iterations):
+            self.model.slim_optimize(error_value=None,
+                                     message='gapfilling optimization failed')
+            solution = [self.model.reactions.get_by_id(ind.rxn_id)
+                        for ind in self.indicators if
+                        ind._get_primal() > self.integer_threshold]
+            if not self.validate(solution):
+                raise RuntimeError('failed to validate gapfilled model, '
+                                   'try lowering the integer_threshold')
+            used_reactions.append(solution)
+            self.update_costs()
         return used_reactions
 
+    def validate(self, reactions):
+        with self.original_model as model:
+            model.add_reactions(reactions)
+            model.slim_optimize()
+            return (model.solver.status == OPTIMAL and
+                    model.solver.objective.value >= self.lower_bound)
+
+
+def gapfill(model, universal=None, lower_bound=0.05,
+            penalties=None, demand_reactions=True, exchange_reactions=False,
+            iterations=1):
+    """Perform gapfilling on a model.
+
+    See documentation for the class GapFiller.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to perform gap filling on.
+    universal : cobra.Model, None
+        A universal model with reactions that can be used to complete the
+        model. Only gapfill considering demand and exchange reactions if
+        left missing.
+    lower_bound : float
+        The minimally accepted flux for the objective in the filled model.
+    penalties : dict, None
+        A dictionary with keys being 'universal' (all reactions included in
+        the universal model), 'exchange' and 'demand' (all additionally
+        added exchange and demand reactions) for the three reaction types.
+        Can also have reaction identifiers for reaction specific costs.
+        Defaults are 1, 100 and 1 respectively.
+    iterations : int
+        The number of rounds of gapfilling to perform. For every iteration,
+        the penalty for every used reaction increases linearly. This way,
+        the algorithm is encouraged to search for alternative solutions
+        which may include previously used reactions. I.e., with enough
+        iterations pathways including 10 steps will eventually be reported
+        even if the shortest pathway is a single reaction.
+    exchange_reactions : bool
+        Consider adding exchange (uptake) reactions for all metabolites
+        in the model.
+    demand_reactions : bool
+        Consider adding demand reactions for all metabolites.
+
+    Returns
+    -------
+    iterable
+        list of lists with on set of reactions that completes the model per
+        requested iteration.
+
+    Examples
+    --------
+    >>> import cobra.test as ct
+    >>> from cobra import Model
+    >>> from cobra.flux_analysis import gapfill
+    >>> model = ct.create_test_model("salmonella")
+    >>> universal = Model('universal')
+    >>> universal.add_reactions(model.reactions.GF6PTA.copy())
+    >>> model.remove_reactions([model.reactions.GF6PTA])
+    >>> gapfill(model, universal)
+    """
+    gapfiller = GapFiller(model, universal=universal,
+                          lower_bound=lower_bound, penalties=penalties,
+                          demand_reactions=demand_reactions,
+                          exchange_reactions=exchange_reactions)
+    return gapfiller.fill(iterations=iterations)
+
 
 def growMatch(model, Universal, dm_rxns=False, ex_rxns=False,
-              penalties=None, **solver_parameters):
-    """runs growMatch"""
-    SUX = SUXModelMILP(model, Universal, dm_rxns=dm_rxns, ex_rxns=ex_rxns,
-                       penalties=penalties)
-    return SUX.solve(**solver_parameters)
+              penalties=None, iterations=1, **solver_parameters):
+    """runs (partial implementation of) growMatch. Legacy function,
+    to be removed in future version of cobrapy in favor of gapfill. """
+
+    warn('use gapfill instead', DeprecationWarning)
+    if 'solver' in dict(**solver_parameters):
+        raise ValueError('growMatch implementation for cobra legacy solvers'
+                         ' is defunct. Choose optlang solver with '
+                         'model.solver = "solver"')
+    return gapfill(model, universal=Universal,
+                   iterations=iterations, penalties=penalties,
+                   demand_reactions=dm_rxns, exchange_reactions=ex_rxns)
 
 
 def SMILEY(model, metabolite_id, Universal,
            dm_rxns=False, ex_rxns=False, penalties=None, **solver_parameters):
-    """
-    runs the SMILEY algorithm to determine which gaps should be
-    filled in order for the model to create the metabolite with the
-    given metabolite_id.
-
-    This function is good for running the algorithm once. For more fine-
-    grained control, create a SUXModelMILP object, add a demand reaction
-    for the given metabolite_id, and call the solve function on the
-    SUXModelMILP object.
-    """
-    SUX = SUXModelMILP(model, Universal, dm_rxns=dm_rxns, ex_rxns=ex_rxns,
-                       penalties=penalties)
-    # change the objective to be the metabolite
-    for reaction in SUX.original_reactions:
-        reaction.objective_coefficient = 0
-    demand_name = "SMILEY_DM_" + metabolite_id
-    if demand_name not in SUX.reactions:
-        demand_reaction = Reaction(demand_name)
-        demand_reaction.add_metabolites(
-            {SUX.metabolites.get_by_id(metabolite_id): -1})
-        SUX.add_reaction(demand_reaction)
-    else:
-        demand_reaction = SUX.reactions.get_by_id(demand_name)
-    demand_reaction.lower_bound = SUX.threshold
-    return SUX.solve(**solver_parameters)
+    """runs the SMILEY algorithm. Legacy function,
+    to be removed in future version of cobrapy in favor of gapfill. """
+
+    warn('use gapfill instead', DeprecationWarning)
+    if 'solver' in dict(**solver_parameters):
+        raise ValueError('SMILEY implementation for cobra legacy solvers'
+                         ' is defunct. Choose optlang solver with '
+                         'model.solver = "solver"')
+    with model:
+        metabolite = model.metabolites.get_by_id(metabolite_id)
+        model.objective = model.add_boundary(metabolite, type='demand')
+        return gapfill(model, universal=Universal, penalties=penalties,
+                       demand_reactions=dm_rxns, exchange_reactions=ex_rxns)
diff --git a/cobra/flux_analysis/loopless.py b/cobra/flux_analysis/loopless.py
index 97923b7..c7af942 100644
--- a/cobra/flux_analysis/loopless.py
+++ b/cobra/flux_analysis/loopless.py
@@ -1,10 +1,256 @@
-from ..core import Reaction, Metabolite
-from ..manipulation.modify import convert_to_irreversible
+# -*- coding: utf-8 -*-
+
+"""Provides functions to remove thermodynamically infeasible loops."""
+
+from __future__ import absolute_import
+
+import numpy
 from six import iteritems
+from sympy.core.singleton import S
+
+from cobra.core import Metabolite, Reaction, get_solution
+from cobra.util import (linear_reaction_coefficients,
+                        create_stoichiometric_matrix, nullspace)
+from cobra.manipulation.modify import convert_to_irreversible
+
+
+def add_loopless(model, zero_cutoff=1e-12):
+    """Modify a model so all feasible flux distributions are loopless.
+
+    In most cases you probably want to use the much faster `loopless_solution`.
+    May be used in cases where you want to add complex constraints and
+    objecives (for instance quadratic objectives) to the model afterwards
+    or use an approximation of Gibbs free energy directions in you model.
+    Adds variables and constraints to a model which will disallow flux
+    distributions with loops. The used formulation is described in [1]_.
+    This function *will* modify your model.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to which to add the constraints.
+    zero_cutoff : positive float, optional
+        Cutoff used for null space. Coefficients with an absolute value smaller
+        than `zero_cutoff` are considered to be zero.
+
+    Returns
+    -------
+    Nothing
+
+    References
+    ----------
+    .. [1] Elimination of thermodynamically infeasible loops in steady-state
+       metabolic models. Schellenberger J, Lewis NE, Palsson BO. Biophys J.
+       2011 Feb 2;100(3):544-53. doi: 10.1016/j.bpj.2010.12.3707. Erratum
+       in: Biophys J. 2011 Mar 2;100(5):1381.
+    """
+    internal = [i for i, r in enumerate(model.reactions) if not r.boundary]
+    s_int = create_stoichiometric_matrix(model)[:, numpy.array(internal)]
+    n_int = nullspace(s_int).T
+    max_bound = max(max(abs(b) for b in r.bounds) for r in model.reactions)
+    prob = model.problem
+
+    # Add indicator variables and new constraints
+    to_add = []
+    for i in internal:
+        rxn = model.reactions[i]
+        # indicator variable a_i
+        indicator = prob.Variable("indicator_" + rxn.id, type="binary")
+        # -M*(1 - a_i) <= v_i <= M*a_i
+        on_off_constraint = prob.Constraint(
+            rxn.flux_expression - max_bound * indicator,
+            lb=-max_bound, ub=0, name="on_off_" + rxn.id)
+        # -(max_bound + 1) * a_i + 1 <= G_i <= -(max_bound + 1) * a_i + 1000
+        delta_g = prob.Variable("delta_g_" + rxn.id)
+        delta_g_range = prob.Constraint(
+            delta_g + (max_bound + 1) * indicator,
+            lb=1, ub=max_bound, name="delta_g_range_" + rxn.id)
+        to_add.extend([indicator, on_off_constraint, delta_g, delta_g_range])
+
+    model.add_cons_vars(to_add)
+
+    # Add nullspace constraints for G_i
+    for i, row in enumerate(n_int):
+        name = "nullspace_constraint_" + str(i)
+        nullspace_constraint = prob.Constraint(S.Zero, lb=0, ub=0, name=name)
+        model.add_cons_vars([nullspace_constraint])
+        coefs = {model.variables[
+                 "delta_g_" + model.reactions[ridx].id]: row[i]
+                 for i, ridx in enumerate(internal) if
+                 abs(row[i]) > zero_cutoff}
+        model.constraints[name].set_linear_coefficients(coefs)
+
+
+def _add_cycle_free(model, fluxes):
+    """Add constraints for CycleFreeFlux."""
+    model.objective = S.Zero
+    for rxn in model.reactions:
+        flux = fluxes[rxn.id]
+        if rxn.boundary:
+            rxn.bounds = (flux, flux)
+            continue
+        if flux >= 0:
+            rxn.lower_bound = max(0, rxn.lower_bound)
+            model.objective.set_linear_coefficients(
+                {rxn.forward_variable: 1, rxn.reverse_variable: -1})
+        else:
+            rxn.upper_bound = min(0, rxn.upper_bound)
+            model.objective.set_linear_coefficients(
+                {rxn.forward_variable: -1, rxn.reverse_variable: 1})
+
+    model.solver.objective.direction = "min"
+
+
+def loopless_solution(model, fluxes=None):
+    """Convert an existing solution to a loopless one.
+
+    Removes as many loops as possible (see Notes).
+    Uses the method from CycleFreeFlux [1]_ and is much faster than
+    `add_loopless` and should therefore be the preferred option to get loopless
+    flux distributions.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to which to add the constraints.
+    fluxes : dict
+        A dictionary {rxn_id: flux} that assigns a flux to each reaction. If
+        not None will use the provided flux values to obtain a close loopless
+        solution.
+        Note that this requires a linear objective function involving
+        only the model reactions. This is the case if
+        `linear_reaction_coefficients(model)` is a correct representation of
+        the objective.
+
+    Returns
+    -------
+    cobra.Solution
+        A solution object containing the fluxes with the least amount of
+        loops possible or None if the optimization failed (usually happening
+        if the flux distribution in `fluxes` is infeasible).
+
+    Notes
+    -----
+
+    The returned flux solution has the following properties:
+
+    - it contains the minimal number of loops possible and no loops at all if
+      all flux bounds include zero
+    - it has the same exact objective value as the previous solution
+    - it has the same exact exchange fluxes as the previous solution
+    - all fluxes have the same sign (flow in the same direction) as the
+      previous solution
+
+    References
+    ----------
+    .. [1] CycleFreeFlux: efficient removal of thermodynamically infeasible
+       loops from flux distributions. Desouki AA, Jarre F, Gelius-Dietrich
+       G, Lercher MJ. Bioinformatics. 2015 Jul 1;31(13):2159-65. doi:
+       10.1093/bioinformatics/btv096.
+    """
+    # Need to reoptimize otherwise spurious solution artifacts can cause
+    # all kinds of havoc
+    # TODO: check solution status
+    if fluxes is None:
+        sol = model.optimize(objective_sense=None)
+        fluxes = sol.fluxes
+        obj_val = sol.objective_value
+    else:
+        coefs = linear_reaction_coefficients(model)
+        obj_val = sum(coefs[rxn] * fluxes[rxn.id] for rxn in coefs)
+
+    with model:
+        prob = model.problem
+        loopless_obj_constraint = prob.Constraint(
+            model.objective.expression,
+            lb=obj_val, ub=obj_val, name="loopless_obj_constraint")
+        model.add_cons_vars([loopless_obj_constraint])
+        _add_cycle_free(model, fluxes)
+        solution = model.optimize(objective_sense=None)
+
+    return solution
+
+
+def loopless_fva_iter(model, reaction, solution=False, zero_cutoff=1e-6):
+    """Plugin to get a loopless FVA solution from single FVA iteration.
+
+    Assumes the following about `model` and `reaction`:
+    1. the model objective is set to be `reaction`
+    2. the model has been optimized and contains the minimum/maximum flux for
+       `reaction`
+    3. the model contains an auxiliary variable called "fva_old_objective"
+       denoting the previous objective
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to be used.
+    reaction : cobra.Reaction
+        The reaction currently minimized/maximized.
+    solution : boolean, optional
+        Whether to return the entire solution or only the minimum/maximum for
+        `reaction`.
+    zero_cutoff : positive float, optional
+        Cutoff used for loop removal. Fluxes with an absolute value smaller
+        than `zero_cutoff` are considered to be zero.
+
+    Returns
+    -------
+    single float or dict
+        Returns the minimized/maximized flux through `reaction` if
+        all_fluxes == False (default). Otherwise returns a loopless flux
+        solution containing the minimum/maximum flux for `reaction`.
+    """
+    current = model.objective.value
+    sol = get_solution(model)
+
+    # boundary reactions can not be part of cycles
+    if reaction.boundary:
+        if solution:
+            return sol
+        else:
+            return current
+
+    with model:
+        model.objective = 1.0 * model.variables.fva_old_objective
+        _add_cycle_free(model, sol.fluxes)
+        model.slim_optimize()
+        flux = reaction.flux
+
+        # If the previous optimum is maintained in the loopless solution it was
+        # loopless and we are done
+        if abs(flux - current) < zero_cutoff:
+            if solution:
+                return sol
+            return current
+
+        # If previous optimum was not in the loopless solution create a new
+        # almost loopless solution containing only loops including the current
+        # reaction. Than remove all of those loops.
+        ll_sol = get_solution(model).fluxes
+        bounds = reaction.bounds
+        reaction.bounds = (current, current)
+        model.slim_optimize()
+        almost_ll_sol = get_solution(model).fluxes
+        reaction.bounds = bounds
+        # find the reactions with loops using the current reaction and remove
+        # the loops
+        for rxn in model.reactions:
+            rid = rxn.id
+            if ((abs(ll_sol[rid]) < zero_cutoff) and
+                    (abs(almost_ll_sol[rid]) > zero_cutoff)):
+                rxn.bounds = max(0, rxn.lower_bound), min(0, rxn.upper_bound)
+
+        if solution:
+            best = model.optimize(objective_sense=None)
+        else:
+            model.slim_optimize()
+            best = reaction.flux
+    return best
 
 
 def construct_loopless_model(cobra_model):
-    """construct a loopless model
+    """Construct a loopless model.
 
     This adds MILP constraints to prevent flux from proceeding in a loop, as
     done in http://dx.doi.org/10.1016/j.bpj.2010.12.3707
diff --git a/cobra/flux_analysis/moma.py b/cobra/flux_analysis/moma.py
index a02b905..6b47dad 100644
--- a/cobra/flux_analysis/moma.py
+++ b/cobra/flux_analysis/moma.py
@@ -1,24 +1,117 @@
-from scipy.sparse import dok_matrix
+# -*- coding: utf-8 -*-
+
+"""Contains functions to run minimization of metabolic adjustment (MOMA)."""
 
-from ..solvers import get_solver_name, solver_dict
+from __future__ import absolute_import
+
+from scipy.sparse import dok_matrix
+from sympy.core.singleton import S
+
+import cobra.util.solver as sutil
+from cobra.solvers import get_solver_name, solver_dict
+
+
+def add_moma(model, solution=None, linear=False):
+    r"""Add constraints and objective representing for MOMA.
+
+    This adds variables and constraints for the minimization of metabolic
+    adjustment (MOMA) to the model.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to add MOMA constraints and objective to.
+    solution : cobra.Solution
+        A previous solution to use as a reference.
+    linear : bool
+        Whether to use the linear MOMA formulation or not.
+
+    Returns
+    -------
+    Nothing.
+
+    Notes
+    -----
+    In the original MOMA specification one looks for the flux distribution
+    of the deletion (v^d) closest to the fluxes without the deletion (v).
+    In math this means:
+
+    minimize \sum_i (v^d_i - v_i)^2
+    s.t. Sv^d = 0
+         lb_i <= v^d_i <= ub_i
+
+    Here, we use a variable transformation v^t := v^d_i - v_i. Substituting
+    and using the fact that Sv = 0 gives:
+
+    minimize \sum_i (v^t_i)^2
+    s.t. Sv^d = 0
+         v^t = v^d_i - v_i
+         lb_i <= v^d_i <= ub_i
+
+    So basically we just re-center the flux space at the old solution and than
+    find the flux distribution closest to the new zero (center). This is the
+    same strategy as used in cameo.
+
+    In the case of linear MOMA, we instead minimize \sum_i abs(v^t_i). The
+    linear MOMA is typically significantly faster. Also quadratic MOMA tends
+    to give flux distributions in which all fluxes deviate from the reference
+    fluxes a little bit whereas linear MOMA tends to give flux distributions
+    where the majority of fluxes are the same reference which few fluxes
+    deviating a lot (typical effect of L2 norm vs L1 norm).
+
+    The former objective function is saved in the optlang solver interface as
+    "moma_old_objective" and this can be used to immediately extract the value
+    of the former objective after MOMA optimization.
+    """
+    if 'moma_old_objective' in model.solver.variables:
+        raise ValueError('model is already adjusted for MOMA')
+
+    # Fall back to default QP solver if current one has no QP capability
+    if not linear:
+        model.solver = sutil.choose_solver(model, qp=True)[1]
+
+    if solution is None:
+        solution = model.optimize()
+    prob = model.problem
+    v = prob.Variable("moma_old_objective")
+    c = prob.Constraint(model.solver.objective.expression - v,
+                        lb=0.0, ub=0.0, name="moma_old_objective_constraint")
+    to_add = [v, c]
+    new_obj = S.Zero
+    for r in model.reactions:
+        flux = solution.fluxes[r.id]
+        if linear:
+            components = sutil.add_absolute_expression(
+                model, r.flux_expression, name="moma_dist_" + r.id,
+                difference=flux, add=False)
+            to_add.extend(components)
+            new_obj += components.variable
+        else:
+            dist = prob.Variable("moma_dist_" + r.id)
+            const = prob.Constraint(r.flux_expression - dist, lb=flux, ub=flux,
+                                    name="moma_constraint_" + r.id)
+            to_add.extend([dist, const])
+            new_obj += dist**2
+    model.add_cons_vars(to_add)
+    model.objective = prob.Objective(new_obj, direction='min')
 
 
 def create_euclidian_moma_model(cobra_model, wt_model=None, **solver_args):
+    """Create a new moma model (legacy function)."""
     # make the wild type copy if none was supplied
     if wt_model is None:
         wt_model = cobra_model.copy()
     else:
         wt_model = wt_model.copy()
         # ensure single objective
-        wt_obj = wt_model.reactions.query(lambda x: x > 0,
-                                          "objective_coefficient")
+        wt_obj = sutil.linear_reaction_coefficients(wt_model)
         if len(wt_obj) != 1:
             raise ValueError("wt_model must have exactly 1 objective, %d found"
                              % len(wt_obj))
 
-    obj = cobra_model.reactions.query(lambda x: x > 0, "objective_coefficient")
+    obj = sutil.linear_reaction_coefficients(wt_model)
     if len(obj) == 1:
-        objective_id = obj[0].id
+        objective_id = list(obj)[0].id
     else:
         raise ValueError("model must have exactly 1 objective, %d found" %
                          len(obj))
@@ -44,7 +137,7 @@ def create_euclidian_moma_model(cobra_model, wt_model=None, **solver_args):
 
 
 def create_euclidian_distance_objective(n_moma_reactions):
-    """returns a matrix which will minimze the euclidian distance
+    """Return a matrix which will minimize the euclidian distance (legacy).
 
     This matrix has the structure
     [ I  -I]
@@ -52,9 +145,17 @@ def create_euclidian_distance_objective(n_moma_reactions):
     where I is the identity matrix the same size as the number of
     reactions in the original model.
 
+    Parameters
+    ----------
     n_moma_reactions: int
         This is the number of reactions in the MOMA model, which should
-        be twice the number of reactions in the original model"""
+        be twice the number of reactions in the original model
+
+    Returns
+    -------
+    scipy.sparse.dok_matrix
+        A matrix describing the distance objective.
+    """
     if n_moma_reactions % 2 != 0:
         raise ValueError("must be even")
     n_reactions = n_moma_reactions // 2
@@ -68,6 +169,7 @@ def create_euclidian_distance_objective(n_moma_reactions):
 
 
 def create_euclidian_distance_lp(moma_model, solver):
+    """Create the distance linear program (legacy method)."""
     Q = create_euclidian_distance_objective(len(moma_model.reactions))
     lp = solver.create_problem(moma_model, objective_sense="minimize",
                                quadratic_component=Q)
@@ -75,6 +177,7 @@ def create_euclidian_distance_lp(moma_model, solver):
 
 
 def solve_moma_model(moma_model, objective_id, solver=None, **solver_args):
+    """Solve the MOMA LP (legacy method)."""
     solver = solver_dict[solver if solver and isinstance(solver, str)
                          else get_solver_name(qp=True)]
     lp = create_euclidian_distance_lp(moma_model, solver=solver)
@@ -82,11 +185,11 @@ def solve_moma_model(moma_model, objective_id, solver=None, **solver_args):
     solution = solver.format_solution(lp, moma_model)
     solution.f = 0. if solution.x_dict is None \
         else solution.x_dict[objective_id]
-    moma_model.solution = solution
     return solution
 
 
 def moma(wt_model, mutant_model, solver=None, **solver_args):
+    """Run MOMA on models (legacy method)."""
     if "norm_type" in solver_args:
         print("only euclidian norm type supported for moma")
         solver_args.pop("norm_type")
@@ -97,7 +200,7 @@ def moma(wt_model, mutant_model, solver=None, **solver_args):
 
 
 def moma_knockout(moma_model, moma_objective, reaction_indexes, **moma_args):
-    """computes result of reaction_knockouts using moma"""
+    """Compute result of reaction_knockouts using moma."""
     n = len(moma_model.reactions) // 2
     # knock out the reaction
     for i in reaction_indexes:
diff --git a/cobra/flux_analysis/parsimonious.py b/cobra/flux_analysis/parsimonious.py
index 44546ea..85ac3ee 100644
--- a/cobra/flux_analysis/parsimonious.py
+++ b/cobra/flux_analysis/parsimonious.py
@@ -1,86 +1,248 @@
-from six import iteritems
-
-from ..manipulation.modify import convert_to_irreversible, revert_to_reversible
-from ..solvers import solver_dict, get_solver_name
-
-
-def optimize_minimal_flux(cobra_model, already_irreversible=False,
-                          fraction_of_optimum=1.0, solver=None,
-                          desired_objective_value=None, **optimize_kwargs):
-    """Perform basic pFBA (parsimonius FBA) and minimize total flux.
-
-    The function attempts to act as a drop-in replacement for optimize. It
-    will make the reaction reversible and perform an optimization, then
-    force the objective value to remain the same and minimize the total
-    flux. Finally, it will convert the reaction back to the irreversible
-    form it was in before. See http://dx.doi.org/10.1038/msb.2010.47
-
-    cobra_model : :class:`~cobra.core.Model` object
-
-    already_irreversible : bool, optional
-        By default, the model is converted to an irreversible one.
-        However, if the model is already irreversible, this step can be
-        skipped
-
-    fraction_of_optimum : float, optional
-        Fraction of optimum which must be maintained. The original objective
-        reaction is constrained to be greater than maximal_value *
-        fraction_of_optimum. By default, this option is specified to be 1.0
-
-    desired_objective_value : float, optional
-        A desired objective value for the minimal solution that bypasses the
-        initial optimization result.
-
-    solver : string of solver name
-        If None is given, the default solver will be used.
-
-    Updates everything in-place, returns model to original state at end.
-    """
-
-    if len(cobra_model.objective) > 1:
-        raise ValueError('optimize_minimal_flux only supports models with'
-                         ' a single objective function')
-
-    if 'objective_sense' in optimize_kwargs:
-        if optimize_kwargs['objective_sense'] == 'minimize':
-            raise ValueError(
-                'Minimization not supported in optimize_minimal_flux')
-        optimize_kwargs.pop('objective_sense', None)
-
-    if not already_irreversible:
-        convert_to_irreversible(cobra_model)
-
-    solver = solver_dict[get_solver_name() if solver is None else solver]
-    lp = solver.create_problem(cobra_model, **optimize_kwargs)
-    if not desired_objective_value:
-        solver.solve_problem(lp, objective_sense='maximize')
-        status = solver.get_status(lp)
-        if status != "optimal":
-            revert_to_reversible(cobra_model)
-            raise ValueError(
-                "pFBA requires optimal solution status, not {}".format(status))
-        desired_objective_value = solver.get_objective_value(lp)
-
-    for i, reaction in enumerate(cobra_model.reactions):
-
-        if reaction.objective_coefficient != 0:
-            # Enforce a certain fraction of the original objective
-            target = (desired_objective_value * fraction_of_optimum /
-                      reaction.objective_coefficient)
-            solver.change_variable_bounds(lp, i, target, reaction.upper_bound)
-
-        # Minimize all reaction fluxes (including objective?)
-        solver.change_variable_objective(lp, i, 1)
-
-    solver.solve_problem(lp, objective_sense='minimize', **optimize_kwargs)
-    solution = solver.format_solution(lp, cobra_model)
-
-    # Return the model to its original state
-    cobra_model.solution = solution
-    revert_to_reversible(cobra_model)
-
-    if solution.status == "optimal":
-        cobra_model.solution.f = sum([coeff * reaction.x for reaction, coeff in
-                                      iteritems(cobra_model.objective)])
-
-    return solution
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import logging
+from warnings import warn
+from itertools import chain
+
+import sympy
+
+from cobra.util import solver as sutil
+from cobra.manipulation.modify import (
+    convert_to_irreversible, revert_to_reversible)
+from cobra.util import linear_reaction_coefficients, set_objective
+from cobra.core.solution import get_solution
+
+add = sympy.Add._from_args
+mul = sympy.Mul._from_args
+LOGGER = logging.getLogger(__name__)
+
+
+def optimize_minimal_flux(*args, **kwargs):
+    warn("optimize_minimal_flux has been renamed to pfba", DeprecationWarning)
+    return pfba(*args, **kwargs)
+
+
+def pfba(model, already_irreversible=False,
+         fraction_of_optimum=1.0, solver=None,
+         desired_objective_value=None, objective=None,
+         reactions=None, **optimize_kwargs):
+    """Perform basic pFBA (parsimonious Enzyme Usage Flux Balance Analysis)
+    to minimize total flux.
+
+    pFBA [1] adds the minimization of all fluxes the the objective of the
+    model. This approach is motivated by the idea that high fluxes have a
+    higher enzyme turn-over and that since producing enzymes is costly,
+    the cell will try to minimize overall flux while still maximizing the
+    original objective function, e.g. the growth rate.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model
+    already_irreversible : bool, optional
+        By default, the model is converted to an irreversible one.
+        However, if the model is already irreversible, this step can be
+        skipped. Ignored for optlang solvers as not relevant.
+    fraction_of_optimum : float, optional
+        Fraction of optimum which must be maintained. The original objective
+        reaction is constrained to be greater than maximal_value *
+        fraction_of_optimum.
+    solver : str, optional
+        Name of the solver to be used. If None it will respect the solver set
+        in the model (model.solver).
+    desired_objective_value : float, optional
+        A desired objective value for the minimal solution that bypasses the
+        initial optimization result. Ignored for optlang solvers, instead,
+        define your objective separately and pass using the `objective`
+        argument.
+    objective : dict or model.problem.Objective
+        A desired objective to use during optimization in addition to the
+        pFBA objective. Dictionaries (reaction as key, coefficient as value)
+        can be used for linear objectives. Not used for non-optlang solvers.
+    reactions : iterable
+        List of reactions or reaction identifiers. Implies `return_frame` to
+        be true. Only return fluxes for the given reactions. Faster than
+        fetching all fluxes if only a few are needed. Only supported for
+        optlang solvers.
+    **optimize_kwargs : additional arguments for legacy solver, optional
+        Additional arguments passed to the legacy solver. Ignored for
+        optlang solver (those can be configured using
+         model.solver.configuration).
+
+    Returns
+    -------
+    cobra.Solution
+        The solution object to the optimized model with pFBA constraints added.
+
+    References
+    ----------
+    .. [1] Lewis, N. E., Hixson, K. K., Conrad, T. M., Lerman, J. A.,
+       Charusanti, P., Polpitiya, A. D., Palsson, B. O. (2010). Omic data
+       from evolved E. coli are consistent with computed optimal growth from
+       genome-scale models. Molecular Systems Biology, 6,
+       390. doi:10.1038/msb.2010.47
+
+    """
+    legacy, solver = sutil.choose_solver(model, solver)
+    if legacy:
+        return _pfba_legacy(
+            model, already_irreversible=already_irreversible,
+            fraction_of_optimum=fraction_of_optimum, solver=solver,
+            desired_objective_value=desired_objective_value,
+            **optimize_kwargs)
+    else:
+        model.solver = solver
+        return _pfba_optlang(
+            model, objective=objective,
+            fraction_of_optimum=fraction_of_optimum, reactions=reactions)
+
+
+def add_pfba(model, objective=None, fraction_of_optimum=1.0):
+    """Add pFBA objective
+
+    Add objective to minimize the summed flux of all reactions to the
+    current objective.
+
+    See Also
+    -------
+    pfba
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to add the objective to
+    objective :
+        An objective to set in combination with the pFBA objective.
+    fraction_of_optimum : float
+        Fraction of optimum which must be maintained. The original objective
+        reaction is constrained to be greater than maximal_value *
+        fraction_of_optimum.
+    """
+    if objective is not None:
+        model.objective = objective
+    if model.solver.objective.name == '_pfba_objective':
+        raise ValueError('model already has pfba objective')
+    sutil.fix_objective_as_constraint(model, fraction=fraction_of_optimum)
+    reaction_variables = ((rxn.forward_variable, rxn.reverse_variable)
+                          for rxn in model.reactions)
+    variables = chain(*reaction_variables)
+    pfba_objective = model.problem.Objective(add(
+        [mul((sympy.singleton.S.One, variable))
+         for variable in variables]), direction='min', sloppy=True,
+        name="_pfba_objective")
+    set_objective(model, pfba_objective)
+
+
+def _pfba_optlang(model, objective=None, reactions=None,
+                  fraction_of_optimum=1.0):
+    """Helper function to perform pFBA with the optlang interface
+
+    Not meant to be used directly.
+
+    Parameters
+    ----------
+    model : a cobra model
+        The model to perform pFBA on
+    objective :
+        An objective to use in addition to the pFBA constraints.
+    reactions : iterable
+        List of reactions or reaction identifiers.
+
+    Returns
+    -------
+    cobra.Solution
+        The solution to the pFBA optimization.
+
+    Updates everything in-place, returns model to original state at end.
+    """
+    reactions = model.reactions if reactions is None \
+        else model.reactions.get_by_any(reactions)
+    with model as m:
+        add_pfba(m, objective=objective,
+                 fraction_of_optimum=fraction_of_optimum)
+        m.slim_optimize(error_value=None)
+        solution = get_solution(m, reactions=reactions)
+    return solution
+
+
+def _pfba_legacy(model, solver, already_irreversible=False,
+                 fraction_of_optimum=1.0,
+                 desired_objective_value=None,
+                 **optimize_kwargs):
+    """Perform basic pFBA (parsimonious FBA) and minimize total flux.
+
+    The function attempts to act as a drop-in replacement for optimize. It
+    will make the reaction reversible and perform an optimization, then
+    force the objective value to remain the same and minimize the total
+    flux. Finally, it will convert the reaction back to the irreversible
+    form it was in before. See http://dx.doi.org/10.1038/msb.2010.47
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model
+    solver : solver
+        The solver object to use
+    already_irreversible : bool, optional
+        By default, the model is converted to an irreversible one.
+        However, if the model is already irreversible, this step can be
+        skipped
+    fraction_of_optimum : float, optional
+        Fraction of optimum which must be maintained. The original objective
+        reaction is constrained to be greater than maximal_value *
+        fraction_of_optimum. By default, this option is specified to be 1.0
+    desired_objective_value : float, optional
+        A desired objective value for the minimal solution that bypasses the
+        initial optimization result.
+
+    Updates everything in-place, returns model to original state at end.
+    """
+    objective_reactions = linear_reaction_coefficients(model)
+    if len(objective_reactions) > 1:
+        raise ValueError('pfba only supports models with'
+                         ' a single objective function')
+
+    if 'objective_sense' in optimize_kwargs:
+        if optimize_kwargs['objective_sense'] == 'minimize':
+            raise ValueError(
+                'Minimization not supported in pfba')
+        optimize_kwargs.pop('objective_sense', None)
+
+    if not already_irreversible:
+        convert_to_irreversible(model)
+
+    lp = solver.create_problem(model, **optimize_kwargs)
+    if not desired_objective_value:
+        solver.solve_problem(lp, objective_sense='maximize')
+        status = solver.get_status(lp)
+        if status != "optimal":
+            revert_to_reversible(model)
+            raise ValueError(
+                "pFBA requires optimal solution status, not {}".format(status))
+        desired_objective_value = solver.get_objective_value(lp)
+
+    for i, reaction in enumerate(model.reactions):
+
+        if reaction.objective_coefficient != 0:
+            # Enforce a certain fraction of the original objective
+            target = (desired_objective_value * fraction_of_optimum /
+                      reaction.objective_coefficient)
+            solver.change_variable_bounds(lp, i, target, reaction.upper_bound)
+
+        # Minimize all reaction fluxes (including objective?)
+        solver.change_variable_objective(lp, i, 1)
+
+    solver.solve_problem(lp, objective_sense='minimize', **optimize_kwargs)
+    solution = solver.format_solution(lp, model)
+
+    # Return the model to its original state
+    #    model.solution = solution
+    revert_to_reversible(model)
+
+    #    if solution.status == "optimal":
+    #        model.solution.f = sum([coeff * reaction.x for reaction, coeff in
+    #                                iteritems(objective_reactions)])
+
+    return solution
diff --git a/cobra/flux_analysis/phenotype_phase_plane.py b/cobra/flux_analysis/phenotype_phase_plane.py
index 37b3862..72d36ac 100644
--- a/cobra/flux_analysis/phenotype_phase_plane.py
+++ b/cobra/flux_analysis/phenotype_phase_plane.py
@@ -1,20 +1,36 @@
-from numpy import (linspace, zeros, meshgrid, abs, empty, arange, int32,
-                   unravel_index, dtype)
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, division
+
+from warnings import warn
+from collections import defaultdict
+from operator import itemgetter
+from itertools import product
+from six import iteritems
 from multiprocessing import Pool
+import pandas as pd
+from optlang.interface import OPTIMAL
 
-from ..solvers import solver_dict, get_solver_name
+from numpy import (
+    nan, abs, arange, dtype, empty, int32, linspace, meshgrid, unravel_index,
+    zeros, array)
+
+from cobra.solvers import get_solver_name, solver_dict
+import cobra.util.solver as sutil
+from cobra.flux_analysis import flux_variability_analysis as fva
+from cobra.exceptions import OptimizationError
 
 # attempt to import plotting libraries
 try:
     from matplotlib import pyplot
     from mpl_toolkits.mplot3d import axes3d
-except ImportError:
+except (ImportError, RuntimeError):
     pyplot = None
     axes3d = None
 mlab = None  # mayavi may crash python
 try:  # for prettier colors
     from palettable.colorbrewer import get_map
-except ImportError:
+except (ImportError, RuntimeError):
     try:
         from brewer2mpl import get_map
     except ImportError:
@@ -238,6 +254,8 @@ def calculate_phenotype_phase_plane(
     >>> ppp = calculate_phenotype_phase_plane(model, "EX_glc__D_e", "EX_o2_e")
     >>> ppp.plot()
     """
+    warn('calculate_phenotype_phase_plane is deprecated, consider using '
+         'production_envelope instead', DeprecationWarning)
     if solver is None:
         solver = get_solver_name()
     data = phenotypePhasePlaneData(
@@ -287,3 +305,237 @@ def calculate_phenotype_phase_plane(
             data.shadow_prices2[i, j] = result[4]
     data.segment()
     return data
+
+
+def production_envelope(model, reactions, objective=None, c_source=None,
+                        points=20, solver=None):
+    """Calculate the objective value conditioned on all combinations of
+    fluxes for a set of chosen reactions
+
+    The production envelope can be used to analyze a models ability to
+    produce a given compound conditional on the fluxes for another set of
+    reaction, such as the uptake rates. The model is alternately optimize
+    with respect to minimizing and maximizing the objective and record the
+    obtained fluxes. Ranges to compute production is set to the effective
+    bounds, i.e. the minimum / maximum fluxes that can be obtained given
+    current reaction bounds.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to compute the production envelope for.
+    reactions : list or string
+        A list of reactions, reaction identifiers or single reaction
+    objective : string, dict, model.solver.interface.Objective
+        The objective (reaction) to use for the production envelope. Use the
+        model's current objective is left missing.
+    c_source : cobra.Reaction or string
+       A reaction or reaction identifier that is the source of carbon for
+       computing carbon (mol carbon in output over mol carbon in input) and
+       mass yield (gram product over gram output). Only objectives with a
+       carbon containing input and output metabolite is supported.
+    points : int
+       The number of points to calculate production for.
+    solver : string
+       The solver to use - only here for consistency with older
+       implementations (this argument will be removed in the future). The
+       solver should be set using `model.solver` directly. Only optlang
+       based solvers are supported.
+
+    Returns
+    -------
+    pandas.DataFrame
+        A data frame with one row per evaluated point and
+
+        - reaction id : one column per input reaction indicating the flux at
+          each given point,
+        - flux: the objective flux
+
+        - carbon_yield: if carbon source is defined and the product is a
+          single metabolite (mol carbon product per mol carbon feeding source)
+
+        - mass_yield: if carbon source is defined and the product is a
+          single metabolite (gram product per 1 g of feeding source)
+
+        - direction: the direction of the optimization.
+
+        Only points that give a valid solution are returned.
+
+    Examples
+    --------
+    >>> import cobra.test
+    >>> from cobra.flux_analysis import production_envelope
+    >>> model = cobra.test.create_test_model("textbook")
+    >>> production_envelope(model, ["EX_glc__D_e", "EX_o2_e"])
+    """
+    legacy, solver = sutil.choose_solver(model, solver)
+    if legacy:
+        raise ValueError('production_envelope is only implemented for optlang '
+                         'based solver interfaces.')
+    reactions = model.reactions.get_by_any(reactions)
+    objective = model.solver.objective if objective is None else objective
+    result = None
+    with model:
+        model.objective = objective
+        if c_source is None:
+            c_input = get_c_input(model)
+        else:
+            c_input = model.reactions.get_by_any(c_source)[0]
+        objective_reactions = list(sutil.linear_reaction_coefficients(model))
+        if len(objective_reactions) != 1:
+            raise ValueError('cannot calculate yields for objectives with '
+                             'multiple reactions')
+        carbon_io = c_input, objective_reactions[0]
+        min_max = fva(model, reactions, fraction_of_optimum=0)
+        grid = [linspace(min_max.minimum[rxn.id], min_max.maximum[rxn.id],
+                         points, endpoint=True) for rxn in reactions]
+        grid_list = list(product(*grid))
+        result = envelope_for_points(model, reactions, grid_list, carbon_io)
+
+    return pd.DataFrame(result)
+
+
+def envelope_for_points(model, reactions, grid, carbon_io):
+    results = defaultdict(list)
+    for direction in ('minimum', 'maximum'):
+        sense = "min" if direction == "minimum" else "max"
+        for point in grid:
+            with model:
+                model.solver.objective.direction = sense
+                for reaction, coordinate in zip(reactions, point):
+                    reaction.bounds = coordinate, coordinate
+                model.slim_optimize()
+                if model.solver.status == OPTIMAL:
+                    for reaction, coordinate in zip(reactions, point):
+                        results[reaction.id].append(coordinate)
+                    results['direction'].append(direction)
+                    results['flux'].append(model.solver.objective.value)
+                    if carbon_io[0] is not None:
+                        results['carbon_yield'].append(carbon_yield(carbon_io))
+                        results['mass_yield'].append(mass_yield(carbon_io))
+    for key, value in results.items():
+        results[key] = array(value)
+    if carbon_io[0] is not None:
+        results['carbon_source'] = carbon_io[0].id
+    return results
+
+
+def carbon_yield(c_input_output):
+    """ mol product per mol carbon input
+
+    Returns
+    -------
+    float
+        the mol carbon atoms in the product (as defined by the model
+        objective) divided by the mol carbon in the input reactions (as
+        defined by the model medium) or zero in case of division by zero
+        arises
+    """
+
+    c_input, c_output = c_input_output
+    if c_input is None:
+        return nan
+    carbon_input_flux = total_carbon_flux(c_input, consumption=True)
+    carbon_output_flux = total_carbon_flux(c_output, consumption=False)
+    try:
+        return carbon_output_flux / carbon_input_flux
+    except ZeroDivisionError:
+        return nan
+
+
+def mass_yield(c_input_output):
+    """Gram product divided by gram of carbon input source
+
+    Parameters
+    ----------
+    c_input_output : tuple
+        Two reactions, the one that feeds carbon to the system and the one
+        that produces carbon containing compound.
+
+    Returns
+    -------
+    float
+        gram product per 1 g of feeding source
+    """
+    c_input, c_output = c_input_output
+    if input is None:
+        return nan
+    try:
+        c_source, source_flux = single_flux(c_input, consumption=True)
+        c_product, product_flux = single_flux(c_output, consumption=False)
+    except ValueError:
+        return nan
+    mol_prod_mol_src = product_flux / source_flux
+    x = mol_prod_mol_src * c_product.formula_weight
+    return x / c_source.formula_weight
+
+
+def total_carbon_flux(reaction, consumption=True):
+    """summed product carbon flux for a reaction
+
+    Parameters
+    ----------
+    reaction : Reaction
+        the reaction to carbon return flux for
+    consumption : bool
+        flux for consumed metabolite, else produced
+
+    Returns
+    -------
+    float
+        reaction flux multiplied by number of carbon for the products of the
+        reaction
+    """
+    direction = 1 if consumption else -1
+    c_flux = [reaction.flux * coeff * met.elements.get('C', 0) * direction
+              for met, coeff in reaction.metabolites.items()]
+    return sum([flux for flux in c_flux if flux > 0])
+
+
+def single_flux(reaction, consumption=True):
+    """flux into single product for a reaction
+
+    only defined for reactions with single products
+
+    Parameters
+    ----------
+    reaction : Reaction
+        the reaction to product flux for
+    consumption : bool
+        flux for consumed metabolite, else produced
+
+    Returns
+    -------
+    tuple
+        metabolite, flux for the metabolite
+    """
+    if len(list(reaction.metabolites)) != 1:
+        raise ValueError('product flux only defined for single metabolite '
+                         'reactions')
+    met, coeff = next(iteritems(reaction.metabolites))
+    direction = 1 if consumption else -1
+    return met, reaction.flux * coeff * direction
+
+
+def get_c_input(model):
+    """ carbon source reactions
+
+    Returns
+    -------
+    Reaction
+       The medium reaction with highest input carbon flux
+    """
+    try:
+        model.slim_optimize(error_value=None)
+    except OptimizationError:
+        return None
+
+    reactions = model.reactions.get_by_any(list(model.medium))
+    reactions_fluxes = [(rxn, total_carbon_flux(rxn, consumption=True))
+                        for rxn in reactions]
+    source_reactions = [(rxn, c_flux) for rxn, c_flux
+                        in reactions_fluxes if c_flux > 0]
+    try:
+        return max(source_reactions, key=itemgetter(1))[0]
+    except ValueError:
+        return None
diff --git a/cobra/flux_analysis/reaction.py b/cobra/flux_analysis/reaction.py
index 7b31e60..659aba9 100644
--- a/cobra/flux_analysis/reaction.py
+++ b/cobra/flux_analysis/reaction.py
@@ -1,44 +1,147 @@
-# cobra.flux_analysis.reaction.py
-# functions for analyzing / creating objective functions
-from ..core.Reaction import Reaction
+# -*- coding: utf-8 -*-
+
+""" functions for analyzing / creating objective functions """
+
+from __future__ import absolute_import, division
 from six import iteritems
+from warnings import warn
+from operator import attrgetter
+
+from cobra.core import Reaction
+from cobra.util import assert_optimal, choose_solver
+from cobra.exceptions import OptimizationError
 
 
 def assess(model, reaction, flux_coefficient_cutoff=0.001, solver=None):
-    """Assesses the capacity of the model to produce the precursors for the
+    """Assesses production capacity.
+
+    Assesses the capacity of the model to produce the precursors for the
     reaction and absorb the production of the reaction while the reaction is
     operating at, or above, the specified cutoff.
 
-    model: A :class:`~cobra.core.Model` object
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to assess production capacity for
 
-    reaction: A :class:`~cobra.core.Reaction` object
+    reaction : reaction identifier or cobra.Reaction
+        The reaction to assess
 
-    flux_coefficient_cutoff:  Float.  The minimum flux that reaction must carry
-    to be considered active.
+    flux_coefficient_cutoff :  float
+        The minimum flux that reaction must carry to be considered active.
 
-    solver : String or solver name. If None, the default solver will be used.
+    solver : basestring
+        Solver name. If None, the default solver will be used.
 
-    returns: True if the model can produce the precursors and absorb the
-    products for the reaction operating at, or above, flux_coefficient_cutoff.
-    Otherwise, a dictionary of {'precursor': Status, 'product': Status}.  Where
-    Status is the results from assess_precursors and assess_products,
-    respectively.
+    Returns
+    -------
+    bool or dict
+        True if the model can produce the precursors and absorb the products
+        for the reaction operating at, or above, flux_coefficient_cutoff.
+        Otherwise, a dictionary of {'precursor': Status, 'product': Status}.
+        Where Status is the results from assess_precursors and
+        assess_products, respectively.
 
     """
-    reaction = model.reactions.get_by_id(reaction.id)
-    original_objective = model.objective
-    model.objective = reaction
-    model.optimize(solver=solver)
-    model.objective = original_objective
-    if model.solution.f >= flux_coefficient_cutoff:
-        return True
+    reaction = model.reactions.get_by_any(reaction)[0]
+    with model as m:
+        m.objective = reaction
+        if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
+            return True
+        else:
+            results = dict()
+            results['precursors'] = assess_component(
+                model, reaction, 'reactants', flux_coefficient_cutoff)
+            results['products'] = assess_component(
+                model, reaction, 'products', flux_coefficient_cutoff)
+            return results
+
+
+def assess_component(model, reaction, side, flux_coefficient_cutoff=0.001,
+                     solver=None):
+    """Assesses the ability of the model to provide sufficient precursors,
+    or absorb products, for a reaction operating at, or beyond,
+    the specified cutoff.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to assess production capacity for
+
+    reaction : reaction identifier or cobra.Reaction
+        The reaction to assess
+
+    side : basestring
+        Side of the reaction, 'products' or 'reactants'
+
+    flux_coefficient_cutoff :  float
+        The minimum flux that reaction must carry to be considered active.
+
+    solver : basestring
+        Solver name. If None, the default solver will be used.
+
+    Returns
+    -------
+    bool or dict
+        True if the precursors can be simultaneously produced at the
+        specified cutoff. False, if the model has the capacity to produce
+        each individual precursor at the specified threshold  but not all
+        precursors at the required level simultaneously. Otherwise a
+        dictionary of the required and the produced fluxes for each reactant
+        that is not produced in sufficient quantities.
+
+    """
+    reaction = model.reactions.get_by_any(reaction)[0]
+    result_key = dict(reactants='produced', products='capacity')[side]
+    get_components = attrgetter(side)
+    with model as m:
+        m.objective = reaction
+        if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
+            return True
+        simulation_results = {}
+        # build the demand reactions and add all at once
+        demand_reactions = {}
+        for component in get_components(reaction):
+            coeff = reaction.metabolites[component]
+            demand = m.add_boundary(component, type='demand')
+            demand.metabolites[component] = coeff
+            demand_reactions[demand] = (component, coeff)
+        # First assess whether all precursors can be produced simultaneously
+        joint_demand = Reaction("joint_demand")
+        for demand_reaction in demand_reactions:
+            joint_demand += demand_reaction
+        m.add_reactions([joint_demand])
+        m.objective = joint_demand
+        if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
+            return True
+
+        # Otherwise assess the ability of the model to produce each precursor
+        # individually.  Now assess the ability of the model to produce each
+        # reactant for a reaction
+        for demand_reaction, (component, coeff) in iteritems(demand_reactions):
+            # Calculate the maximum amount of the
+            with m:
+                m.objective = demand_reaction
+                flux = _optimize_or_value(m, solver=solver)
+            # metabolite that can be produced.
+            if flux_coefficient_cutoff > flux:
+                # Scale the results to a single unit
+                simulation_results.update({
+                    component: {
+                        'required': flux_coefficient_cutoff / abs(coeff),
+                        result_key: flux / abs(coeff)
+                    }})
+        if len(simulation_results) == 0:
+            simulation_results = False
+        return simulation_results
+
+
+def _optimize_or_value(model, value=0., solver=None):
+    legacy, _ = choose_solver(model, solver=solver)
+    if legacy:
+        return model.optimize(solver=solver).f
     else:
-        results = {}
-        results['precursors'] = assess_precursors(
-            model, reaction, flux_coefficient_cutoff)
-        results['products'] = assess_products(
-            model, reaction, flux_coefficient_cutoff)
-        return results
+        return model.slim_optimize(error_value=value)
 
 
 def assess_precursors(model, reaction, flux_coefficient_cutoff=0.001,
@@ -46,155 +149,73 @@ def assess_precursors(model, reaction, flux_coefficient_cutoff=0.001,
     """Assesses the ability of the model to provide sufficient precursors for
     a reaction operating at, or beyond, the specified cutoff.
 
-    model: A :class:`~cobra.core.Model` object
+    Deprecated: use assess_component instead
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to assess production capacity for
 
-    reaction: A :class:`~cobra.core.Reaction` object
+    reaction : reaction identifier or cobra.Reaction
+        The reaction to assess
 
-    flux_coefficient_cutoff: Float. The minimum flux that reaction must carry
-    to be considered active.
+    flux_coefficient_cutoff :  float
+        The minimum flux that reaction must carry to be considered active.
 
-    solver : String or solver name. If None, the default solver will be used.
+    solver : basestring
+        Solver name. If None, the default solver will be used.
 
-    returns: True if the precursors can be simultaneously produced at the
-    specified cutoff. False, if the model has the capacity to produce each
-    individual precursor at the specified threshold  but not all precursors at
-    the required level simultaneously. Otherwise a dictionary of the required
-    and the produced fluxes for each reactant that is not produced in
-    sufficient quantities.
+    Returns
+    -------
+    bool or dict
+        True if the precursors can be simultaneously produced at the
+        specified cutoff. False, if the model has the capacity to produce
+        each individual precursor at the specified threshold  but not all
+        precursors at the required level simultaneously. Otherwise a
+        dictionary of the required and the produced fluxes for each reactant
+        that is not produced in sufficient quantities.
 
     """
-    model = model.copy()
-    reaction = model.reactions.get_by_id(reaction.id)
-    original_objective = model.objective
-    model.objective = reaction
-    model.optimize(solver=solver)
-    model.objective = original_objective
-    if model.solution.f >= flux_coefficient_cutoff:
-        return True
-    #
-    simulation_results = {}
-    # build the sink reactions and add all at once
-    sink_reactions = {}
-    for the_component in reaction.reactants:
-        # add in a sink reaction for each component
-        sink_reaction = Reaction('test_sink_%s' % the_component.id)
-        # then simulate production ability
-        # then check it can exceed objective cutoff * component stoichiometric
-        # coefficient.
-        coefficient = reaction.get_coefficient(the_component)
-        sink_reaction.add_metabolites({the_component: coefficient})
-        sink_reaction.upper_bound = 1000
-        sink_reactions[sink_reaction] = (the_component, coefficient)
-    # First assess whether all precursors can pbe produced simultaneously
-    super_sink = Reaction("super_sink")
-    for reaction in sink_reactions:
-        super_sink += reaction
-    super_sink.id = 'super_sink'
-    model.add_reactions(sink_reactions.keys() + [super_sink])
-    model.objective = super_sink
-    model.optimize(solver=solver)
-    model.objective = original_objective
-    if flux_coefficient_cutoff <= model.solution.f:
-        return True
-
-    # Otherwise assess the ability of the model to produce each precursor
-    # individually.  Now assess the ability of the model to produce each
-    # reactant for a reaction
-    for sink_reaction, (component, coefficient) in iteritems(sink_reactions):
-        # Calculate the maximum amount of the
-        model.objective = sink_reaction
-        model.optimize(solver=solver)
-        model.objective = original_objective
-        # metabolite that can be produced.
-        if flux_coefficient_cutoff > model.solution.f:
-            # Scale the results to a single unit
-            simulation_results.update({
-                component:
-                    {
-                        'required': flux_coefficient_cutoff / abs(coefficient),
-                        'produced': model.solution.f / abs(coefficient)
-                    }
-            })
-    if len(simulation_results) == 0:
-        simulation_results = False
-    return simulation_results
+    warn('use assess_component instead', DeprecationWarning)
+    return assess_component(model, reaction, 'reactants',
+                            flux_coefficient_cutoff, solver)
 
 
 def assess_products(model, reaction, flux_coefficient_cutoff=0.001,
                     solver=None):
     """Assesses whether the model has the capacity to absorb the products of
-    a reaction at a given flux rate.  Useful for identifying which components
-    might be blocking a reaction from achieving a specific flux rate.
+    a reaction at a given flux rate.
+
+    Useful for identifying which components might be blocking a reaction
+    from achieving a specific flux rate.
+
+    Deprecated: use assess_component instead
 
-    model: A :class:`~cobra.core.Model` object
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to assess production capacity for
 
-    reaction: A :class:`~cobra.core.Reaction` object
+    reaction : reaction identifier or cobra.Reaction
+        The reaction to assess
 
-    flux_coefficient_cutoff:  Float.  The minimum flux that reaction must carry
-    to be considered active.
+    flux_coefficient_cutoff :  float
+        The minimum flux that reaction must carry to be considered active.
 
-    solver : String or solver name. If None, the default solver will be used.
+    solver : basestring
+        Solver name. If None, the default solver will be used.
 
-    returns: True if the model has the capacity to absorb all the reaction
-    products being simultaneously given the specified cutoff.   False, if the
-    model has the capacity to absorb each individual product but not all
-    products at the required level simultaneously.   Otherwise a dictionary of
-    the required and the capacity fluxes for each product that is not absorbed
-    in sufficient quantities.
+    Returns
+    -------
+    bool or dict
+        True if the model has the capacity to absorb all the reaction
+        products being simultaneously given the specified cutoff.   False,
+        if the model has the capacity to absorb each individual product but
+        not all products at the required level simultaneously.   Otherwise a
+        dictionary of the required and the capacity fluxes for each product
+        that is not absorbed in sufficient quantities.
 
     """
-    model = model.copy()
-    reaction = model.reactions.get_by_id(reaction.id)
-    original_objective = model.objective
-    model.objective = reaction
-    model.optimize(solver=solver)
-    model.objective = original_objective
-    if model.solution.f >= flux_coefficient_cutoff:
-        return True
-    #
-    simulation_results = {}
-    # build the sink reactions and add all at once
-    source_reactions = {}
-    for the_component in reaction.products:
-        # add in a sink reaction for each component
-        source_reaction = Reaction('test_source_%s' % the_component.id)
-        # then simulate production ability
-        # then check it can exceed objective cutoff * component stoichiometric
-        # coefficient.
-        coefficient = reaction.get_coefficient(the_component)
-        source_reaction.add_metabolites({the_component: coefficient})
-        source_reaction.upper_bound = 1000
-        source_reactions[source_reaction] = (the_component, coefficient)
-    #
-    super_source = Reaction('super_source')
-    for reaction in source_reactions:
-        super_source += reaction
-    super_source.id = 'super_source'
-    model.add_reactions(source_reactions.keys() + [super_source])
-    model.objective = super_source
-    model.optimize(solver=solver)
-    model.objective = original_objective
-    if flux_coefficient_cutoff <= model.solution.f:
-        return True
-
-    # Now assess the ability of the model to produce each reactant for a
-    # reaction
-    for source_reaction, (component, coefficient) in \
-            iteritems(source_reactions):
-        # Calculate the maximum amount of the
-        model.objective = source_reaction
-        model.optimize(solver=solver)
-        model.objective = original_objective
-        # metabolite that can be produced.
-        if flux_coefficient_cutoff > model.solution.f:
-            # Scale the results to a single unit
-            simulation_results.update({
-                component:
-                    {
-                        'required': flux_coefficient_cutoff / abs(coefficient),
-                        'capacity': model.solution.f / abs(coefficient)}
-                    }
-            )
-    if len(simulation_results) == 0:
-        simulation_results = False
-    return simulation_results
+    warn('use assess_component instead', DeprecationWarning)
+    return assess_component(model, reaction, 'products',
+                            flux_coefficient_cutoff, solver)
diff --git a/cobra/flux_analysis/sampling.py b/cobra/flux_analysis/sampling.py
new file mode 100644
index 0000000..a5fa2a2
--- /dev/null
+++ b/cobra/flux_analysis/sampling.py
@@ -0,0 +1,826 @@
+# -*- coding: utf-8 -*-
+
+"""Module implementing flux sampling for cobra models.
+
+New samplers should derive from the abstract `HRSampler` class
+where possible to provide a uniform interface.
+"""
+
+from __future__ import absolute_import, division
+
+import ctypes
+from collections import namedtuple
+from logging import getLogger
+from multiprocessing import Array, Pool
+from time import time
+
+import numpy as np
+import pandas
+from optlang.interface import OPTIMAL
+from sympy.core.singleton import S
+from cobra.util import (create_stoichiometric_matrix, constraint_matrices,
+                        nullspace)
+
+LOGGER = getLogger(__name__)
+"""The logger for the package."""
+
+bounds_tol = np.finfo(np.float32).eps
+"""The tolerance used for checking bounds feasibility."""
+
+feasibility_tol = bounds_tol
+"""The tolerance used for checking equalities feasibility."""
+
+nproj = 1000000
+"""Reproject the solution into the feasibility space every nproj iterations."""
+
+nproj_center = 10000
+"""Reproject the center into the nullspace every nproj_center iterations.
+   Only used for inhomogeneous problems."""
+
+Problem = namedtuple("Problem",
+                     ["equalities", "b", "inequalities", "bounds",
+                      "variable_fixed", "variable_bounds", "nullspace",
+                      "homogeneous"])
+"""Defines the matrix representation of a sampling problem.
+
+Attributes
+----------
+equalities : numpy.array
+    All equality constraints in the model.
+b : numpy.array
+    The right side of the equality constraints.
+inequalities : numpy.array
+    All inequality constraints in the model.
+bounds : numpy.array
+    The lower and upper bounds for the inequality constraints.
+variable_bounds : numpy.array
+    The lower and upper bounds for the variables.
+homogeneous: boolean
+    Indicates whether the sampling problem is homogenous, e.g. whether there
+    exist no non-zero fixed variables or constraints.
+nullspace : numpy.matrix
+    A matrix containing the nullspace of the equality constraints. Each column
+    is one basis vector.
+"""
+
+
+def mp_init(obj):
+    """Initialize the multiprocessing pool."""
+    global sampler
+    sampler = obj
+
+
+def shared_np_array(shape, data=None, integer=False):
+    """Create a new numpy array that resides in shared memory.
+
+    Parameters
+    ----------
+    shape : tuple of ints
+        The shape of the new array.
+    data : numpy.array
+        Data to copy to the new array. Has to have the same shape.
+    integer : boolean
+        Whether to use an integer array. Defaults to False which means
+        float array.
+    """
+    size = np.prod(shape)
+    if integer:
+        array = Array(ctypes.c_int64, int(size))
+        np_array = np.frombuffer(array.get_obj(), dtype="int64")
+    else:
+        array = Array(ctypes.c_double, int(size))
+        np_array = np.frombuffer(array.get_obj())
+    np_array = np_array.reshape(shape)
+
+    if data is not None:
+        if len(shape) != len(data.shape):
+            raise ValueError("`data` must have the same dimensions"
+                             "as the created array.")
+        same = all(x == y for x, y in zip(shape, data.shape))
+        if not same:
+            raise ValueError("`data` must have the same shape"
+                             "as the created array.")
+        np_array[:] = data
+
+    return np_array
+
+
+# Has to be declared outside of class to be used for multiprocessing :(
+def _step(sampler, x, delta, fraction=None):
+    """Sample a new feasible point from the point `x` in direction `delta`."""
+    prob = sampler.problem
+    valid = ((np.abs(delta) > feasibility_tol) &
+             np.logical_not(prob.variable_fixed))
+    # permissible alphas for staying in variable bounds
+    valphas = ((1.0 - bounds_tol) * prob.variable_bounds - x)[:, valid]
+    valphas = (valphas / delta[valid]).flatten()
+    if prob.bounds.shape[0] > 0:
+        # permissible alphas for staying in constraint bounds
+        balphas = ((1.0 - bounds_tol) * prob.bounds -
+                   prob.inequalities.dot(x))
+        balphas = (balphas / prob.inequalities.dot(delta)).flatten()
+        # combined alphas
+        alphas = np.hstack([valphas, balphas])
+    else:
+        alphas = valphas
+    alpha_range = (alphas[alphas > 0.0].min(), alphas[alphas <= 0.0].max())
+
+    if fraction:
+        alpha = alpha_range[0] + fraction * (alpha_range[1] - alpha_range[0])
+    else:
+        alpha = np.random.uniform(alpha_range[0], alpha_range[1])
+    p = x + alpha * delta
+
+    # Numerical instabilities may cause bounds invalidation
+    # reset sampler and sample from one of the original warmup directions
+    # if that occurs
+    if np.any(sampler._bounds_dist(p) < -bounds_tol):
+        LOGGER.info("found bounds infeasibility in sample, "
+                    "resetting to center")
+        newdir = sampler.warmup[np.random.randint(sampler.n_warmup)]
+        return _step(sampler, sampler.center, newdir - sampler.center)
+    return p
+
+
+class HRSampler(object):
+    """The abstract base class for hit-and-run samplers.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model from which to generate samples.
+    thinning : int
+        The thinning factor of the generated sampling chain. A thinning of 10
+        means samples are returned every 10 steps.
+
+    Attributes
+    ----------
+    model : cobra.Model
+        The cobra model from which the samples get generated.
+    thinning : int
+        The currently used thinning factor.
+    n_samples : int
+        The total number of samples that have been generated by this
+        sampler instance.
+    problem : collections.namedtuple
+        A python object whose attributes define the entire sampling problem in
+        matrix form. See docstring of `Problem`.
+    warmup : a numpy matrix
+        A matrix of with as many columns as reactions in the model and more
+        than 3 rows containing a warmup sample in each row. None if no warmup
+        points have been generated yet.
+    seed : positive integer, optional
+        Sets the random number seed. Initialized to the current time stamp if
+        None.
+    fwd_idx : np.array
+        Has one entry for each reaction in the model containing the index of
+        the respective forward variable.
+    rev_idx : np.array
+        Has one entry for each reaction in the model containing the index of
+        the respective reverse variable.
+    """
+
+    def __init__(self, model, thinning, seed=None):
+        """Initialize a new sampler object."""
+        # This currently has to be done to reset the solver basis which is
+        # required to get deterministic warmup point generation
+        # (in turn required for a working `seed` arg)
+        if model.solver.is_integer:
+            raise TypeError("sampling does not work with integer problems :(")
+        self.model = model.copy()
+        self.thinning = thinning
+        self.n_samples = 0
+        self.problem = self.__build_problem()
+        # Set up a map from reaction -> forward/reverse variable
+        var_idx = {v: idx for idx, v in enumerate(model.variables)}
+        self.fwd_idx = np.array([var_idx[r.forward_variable]
+                                 for r in model.reactions])
+        self.rev_idx = np.array([var_idx[r.reverse_variable]
+                                 for r in model.reactions])
+        self.warmup = None
+        if seed is None:
+            self._seed = int(time())
+        else:
+            self._seed = seed
+        # Avoid overflow
+        self._seed = self._seed % np.iinfo(np.int32).max
+
+    def __build_problem(self):
+        """Build the matrix representation of the sampling problem."""
+        # Set up the mathematical problem
+        prob = constraint_matrices(self.model, zero_tol=feasibility_tol)
+        # check if there any non-zero equality constraints
+        equalities = prob.equalities
+        b = prob.b
+        bounds = np.atleast_2d(prob.bounds).T
+        var_bounds = np.atleast_2d(prob.variable_bounds).T
+        homogeneous = all(np.abs(b) < feasibility_tol)
+        fixed_non_zero = np.abs(prob.variable_bounds[:, 1]) > feasibility_tol
+        fixed_non_zero &= prob.variable_fixed
+        # check if there are any non-zero fixed variables, add them as
+        # equalities to the stoichiometric matrix
+        if any(fixed_non_zero):
+            n_fixed = fixed_non_zero.sum()
+            rows = np.zeros((n_fixed, prob.equalities.shape[1]))
+            rows[range(n_fixed), np.where(fixed_non_zero)] = 1.0
+            equalities = np.vstack([equalities, rows])
+            var_b = prob.variable_bounds[:, 1]
+            b = np.hstack([b, var_b[fixed_non_zero]])
+            homogeneous = False
+        # Set up a projection that can cast point into the nullspace
+        nulls = nullspace(equalities)
+        # convert bounds to a matrix and add variable bounds as well
+        return Problem(
+            equalities=shared_np_array(equalities.shape, equalities),
+            b=shared_np_array(b.shape, b),
+            inequalities=shared_np_array(prob.inequalities.shape,
+                                         prob.inequalities),
+            bounds=shared_np_array(bounds.shape, bounds),
+            variable_fixed=shared_np_array(prob.variable_fixed.shape,
+                                           prob.variable_fixed, integer=True),
+            variable_bounds=shared_np_array(var_bounds.shape, var_bounds),
+            nullspace=shared_np_array(nulls.shape, nulls),
+            homogeneous=homogeneous
+        )
+
+    def generate_fva_warmup(self):
+        """Generate the warmup points for the sampler.
+
+        Generates warmup points by setting each flux as the sole objective
+        and minimizing/maximizing it. Also caches the projection of the
+        warmup points into the nullspace for non-homogeneous problems (only
+        if necessary).
+        """
+        self.n_warmup = 0
+        idx = np.hstack([self.fwd_idx, self.rev_idx])
+        self.warmup = np.zeros((len(idx), len(self.model.variables)))
+        self.model.objective = S.Zero
+        self.model.objective.direction = "max"
+        variables = self.model.variables
+        for i in idx:
+            # Omit fixed reactions
+            if self.problem.variable_fixed[i]:
+                LOGGER.info("skipping fixed variable %s" %
+                            variables[i].name)
+                continue
+            self.model.objective.set_linear_coefficients({variables[i]: 1})
+            self.model.slim_optimize()
+            if not self.model.solver.status == OPTIMAL:
+                LOGGER.info("can not maximize variable %s, skipping it" %
+                            variables[i].name)
+                continue
+            primals = self.model.solver.primal_values
+            sol = [primals[v.name] for v in self.model.variables]
+            self.warmup[self.n_warmup, ] = sol
+            self.n_warmup += 1
+            # revert objective
+            self.model.objective.set_linear_coefficients({variables[i]: 0})
+        # Shrink warmup points to measure
+        self.warmup = shared_np_array((self.n_warmup, len(variables)),
+                                      self.warmup[0:self.n_warmup, ])
+
+    def _reproject(self, p):
+        """Reproject a point into the feasibility region.
+
+        This function is guaranteed to return a new feasible point. However,
+        no guarantees in terms of proximity to the original point can be made.
+
+        Parameters
+        ----------
+        p : numpy.array
+            The current sample point.
+
+        Returns
+        -------
+        numpy.array
+            A new feasible point. If `p` was feasible it wil return p.
+        """
+        nulls = self.problem.nullspace
+        equalities = self.problem.equalities
+        # don't reproject if point is feasible
+        if np.allclose(equalities.dot(p), self.problem.b,
+                       rtol=0, atol=feasibility_tol):
+            new = p
+        else:
+            LOGGER.info("feasibility violated in sample"
+                        " %d, trying to reproject" % self.n_samples)
+            new = nulls.dot(nulls.T.dot(p))
+        # Projections may violate bounds
+        # set to random point in space in that case
+        if any(self._bounds_dist(new) < -bounds_tol):
+            LOGGER.info("reprojection failed in sample"
+                        " %d, using random point in space" % self.n_samples)
+            idx = np.random.randint(self.n_warmup,
+                                    size=min(2, int(np.sqrt(self.n_warmup))))
+            new = self.warmup[idx, :].mean(axis=0)
+        return new
+
+    def _bounds_dist(self, p):
+        """Get the lower and upper bound distances. Negative is bad."""
+        prob = self.problem
+        lb_dist = (p - prob.variable_bounds[0, ]).min()
+        ub_dist = (prob.variable_bounds[1, ] - p).min()
+        if prob.bounds.shape[0] > 0:
+            const = prob.inequalities.dot(p)
+            const_lb_dist = (const - prob.bounds[0, ]).min()
+            const_ub_dist = (prob.bounds[1, ] - const).min()
+            lb_dist = min(lb_dist, const_lb_dist)
+            ub_dist = min(ub_dist, const_ub_dist)
+        return np.array([lb_dist, ub_dist])
+
+    def sample(self, n, fluxes=True):
+        """Abstract sampling function.
+
+        Should be overwritten by child classes.
+        """
+        pass
+
+    def batch(self, batch_size, batch_num, fluxes=True):
+        """Create a batch generator.
+
+        This is useful to generate n batches of m samples each.
+
+        Parameters
+        ----------
+        batch_size : int
+            The number of samples contained in each batch (m).
+        batch_num : int
+            The number of batches in the generator (n).
+        fluxes : boolean
+            Whether to return fluxes or the internal solver variables. If set
+            to False will return a variable for each forward and backward flux
+            as well as all additional variables you might have defined in the
+            model.
+
+        Yields
+        ------
+        pandas.DataFrame
+            A DataFrame with dimensions (batch_size x n_r) containing
+            a valid flux sample for a total of n_r reactions (or variables if
+            fluxes=False) in each row.
+        """
+        for i in range(batch_num):
+            yield self.sample(batch_size, fluxes=fluxes)
+
+    def validate(self, samples):
+        """Validate a set of samples for equality and inequality feasibility.
+
+        Can be used to check whether the generated samples and warmup points
+        are feasible.
+
+        Parameters
+        ----------
+        samples : numpy.matrix
+            Must be of dimension (n_samples x n_reactions). Contains the
+            samples to be validated. Samples must be from fluxes.
+
+        Returns
+        -------
+        numpy.array
+            A one-dimensional numpy array of length containing
+            a code of 1 to 3 letters denoting the validation result:
+
+            - 'v' means feasible in bounds and equality constraints
+            - 'l' means a lower bound violation
+            - 'u' means a lower bound validation
+            - 'e' means and equality constraint violation
+        """
+        samples = np.atleast_2d(samples)
+        prob = self.problem
+
+        if samples.shape[1] == len(self.model.reactions):
+            S = create_stoichiometric_matrix(self.model)
+            b = np.array([self.model.constraints[m.id].lb for m in
+                          self.model.metabolites])
+            bounds = np.array([r.bounds for r in self.model.reactions]).T
+        elif samples.shape[1] == len(self.model.variables):
+            S = prob.equalities
+            b = prob.b
+            bounds = prob.variable_bounds
+        else:
+            raise ValueError("Wrong number of columns. samples must have a "
+                             "column for each flux or variable defined in the "
+                             "model!")
+
+        feasibility = np.abs(S.dot(samples.T).T - b)
+        feasibility = feasibility.max(axis=1)
+        lb_error = (samples - bounds[0, ]).min(axis=1)
+        ub_error = (bounds[1, ] - samples).min(axis=1)
+
+        if (samples.shape[1] == len(self.model.variables) and
+                prob.inequalities.shape[0]):
+            consts = prob.inequalities.dot(samples.T)
+            lb_error = np.minimum(
+                lb_error,
+                (consts - prob.bounds[0, ]).min(axis=1))
+            ub_error = np.minimum(
+                ub_error,
+                (prob.bounds[1, ] - consts).min(axis=1)
+            )
+
+        valid = (
+            (feasibility < feasibility_tol) &
+            (lb_error > -bounds_tol) &
+            (ub_error > -bounds_tol))
+        codes = np.repeat("", valid.shape[0]).astype(np.dtype((str, 3)))
+        codes[valid] = "v"
+        codes[lb_error <= -bounds_tol] = np.char.add(
+            codes[lb_error <= -bounds_tol], "l")
+        codes[ub_error <= -bounds_tol] = np.char.add(
+            codes[ub_error <= -bounds_tol], "u")
+        codes[feasibility > feasibility_tol] = np.char.add(
+            codes[feasibility > feasibility_tol], "e")
+        return codes
+
+
+class ACHRSampler(HRSampler):
+    """Artificial Centering Hit-and-Run sampler.
+
+    A sampler with low memory footprint and good convergence.
+
+    Parameters
+    ----------
+    model : a cobra model
+        The cobra model from which to generate samples.
+    thinning : int, optional
+        The thinning factor of the generated sampling chain. A thinning of 10
+        means samples are returned every 10 steps.
+    seed : positive integer, optional
+        Sets the random number seed. Initialized to the current time stamp if
+        None.
+
+    Attributes
+    ----------
+    model : cobra.Model
+        The cobra model from which the samples get generated.
+    thinning : int
+        The currently used thinning factor.
+    n_samples : int
+        The total number of samples that have been generated by this
+        sampler instance.
+    problem : collections.namedtuple
+        A python object whose attributes define the entire sampling problem in
+        matrix form. See docstring of `Problem`.
+    warmup : a numpy matrix
+        A matrix of with as many columns as reactions in the model and more
+        than 3 rows containing a warmup sample in each row. None if no warmup
+        points have been generated yet.
+    seed : positive integer, optional
+        Sets the random number seed. Initialized to the current time stamp if
+        None.
+    fwd_idx : np.array
+        Has one entry for each reaction in the model containing the index of
+        the respective forward variable.
+    rev_idx : np.array
+        Has one entry for each reaction in the model containing the index of
+        the respective reverse variable.
+    prev : numpy array
+        The current/last flux sample generated.
+    center : numpy array
+        The center of the sampling space as estimated by the mean of all
+        previously generated samples.
+
+    Notes
+    -----
+    ACHR generates samples by choosing new directions from the sampling space's
+    center and the warmup points. The implementation used here is the same
+    as in the Matlab Cobra Toolbox [2]_ and uses only the initial warmup points
+    to generate new directions and not any other previous iterates. This
+    usually gives better mixing since the startup points are chosen to span
+    the space in a wide manner. This also makes the generated sampling chain
+    quasi-markovian since the center converges rapidly.
+
+    Memory usage is roughly in the order of (2 * number reactions)^2
+    due to the required nullspace matrices and warmup points. So large
+    models easily take up a few GB of RAM.
+
+    References
+    ----------
+    .. [1] Direction Choice for Accelerated Convergence in Hit-and-Run Sampling
+       David E. Kaufman Robert L. Smith
+       Operations Research 199846:1 , 84-95
+       https://doi.org/10.1287/opre.46.1.84
+    .. [2] https://github.com/opencobra/cobratoolbox
+    """
+
+    def __init__(self, model, thinning=100, seed=None):
+        """Initialize a new ACHRSampler."""
+        super(ACHRSampler, self).__init__(model, thinning, seed=seed)
+        self.generate_fva_warmup()
+        self.prev = self.center = self.warmup.mean(axis=0)
+        np.random.seed(self._seed)
+
+    def __single_iteration(self):
+        pi = np.random.randint(self.n_warmup)
+        # mix in the original warmup points to not get stuck
+        delta = self.warmup[pi, ] - self.center
+        self.prev = _step(self, self.prev, delta)
+        if self.problem.homogeneous and (self.n_samples *
+                                         self.thinning % nproj == 0):
+            self.prev = self._reproject(self.prev)
+        self.center = (self.n_samples * self.center + self.prev) / (
+                       self.n_samples + 1)
+        self.n_samples += 1
+
+    def sample(self, n, fluxes=True):
+        """Generate a set of samples.
+
+        This is the basic sampling function for all hit-and-run samplers.
+
+        Parameters
+        ----------
+        n : int
+            The number of samples that are generated at once.
+        fluxes : boolean
+            Whether to return fluxes or the internal solver variables. If set
+            to False will return a variable for each forward and backward flux
+            as well as all additional variables you might have defined in the
+            model.
+
+        Returns
+        -------
+        numpy.matrix
+            Returns a matrix with `n` rows, each containing a flux sample.
+
+        Notes
+        -----
+        Performance of this function linearly depends on the number
+        of reactions in your model and the thinning factor.
+        """
+        samples = np.zeros((n, self.warmup.shape[1]))
+        for i in range(1, self.thinning * n + 1):
+            self.__single_iteration()
+            if i % self.thinning == 0:
+                samples[i//self.thinning - 1, ] = self.prev
+
+        if fluxes:
+            names = [r.id for r in self.model.reactions]
+            return pandas.DataFrame(
+                samples[:, self.fwd_idx] - samples[:, self.rev_idx],
+                columns=names)
+        else:
+            names = [v.name for v in self.model.variables]
+            return pandas.DataFrame(samples, columns=names)
+
+
+# Unfortunately this has to be outside the class to be usable with
+# multiprocessing :()
+def _sample_chain(args):
+    """Sample a single chain for OptGPSampler.
+
+    center and n_samples are updated locally and forgotten afterwards.
+    """
+    n, idx = args       # has to be this way to work in Python 2.7
+    center = sampler.center
+    np.random.seed((sampler._seed + idx) % np.iinfo(np.int32).max)
+    pi = np.random.randint(sampler.n_warmup)
+    prev = sampler.warmup[pi, ]
+    prev = _step(sampler, center, prev - center, 0.95)
+    n_samples = max(sampler.n_samples, 1)
+    samples = np.zeros((n, center.shape[0]))
+
+    for i in range(1, sampler.thinning * n + 1):
+        pi = np.random.randint(sampler.n_warmup)
+        delta = sampler.warmup[pi, ] - center
+
+        prev = _step(sampler, prev, delta)
+        if sampler.problem.homogeneous and (n_samples *
+                                            sampler.thinning % nproj == 0):
+            prev = sampler._reproject(prev)
+        if i % sampler.thinning == 0:
+            samples[i//sampler.thinning - 1, ] = prev
+            center = (n_samples * center + prev) / (n_samples + 1)
+            n_samples += 1
+
+    return samples
+
+
+class OptGPSampler(HRSampler):
+    """A parallel optimized sampler.
+
+    A parallel sampler with fast convergence and parallel execution. See [1]_
+    for details.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model from which to generate samples.
+    processes: int
+        The number of processes used during sampling.
+    thinning : int, optional
+        The thinning factor of the generated sampling chain. A thinning of 10
+        means samples are returned every 10 steps.
+    seed : positive integer, optional
+        Sets the random number seed. Initialized to the current time stamp if
+        None.
+
+    Attributes
+    ----------
+    model : cobra.Model
+        The cobra model from which the samples get generated.
+    thinning : int
+        The currently used thinning factor.
+    n_samples : int
+        The total number of samples that have been generated by this
+        sampler instance.
+    problem : collections.namedtuple
+        A python object whose attributes define the entire sampling problem in
+        matrix form. See docstring of `Problem`.
+    warmup : a numpy matrix
+        A matrix of with as many columns as reactions in the model and more
+        than 3 rows containing a warmup sample in each row. None if no warmup
+        points have been generated yet.
+    seed : positive integer, optional
+        Sets the random number seed. Initialized to the current time stamp if
+        None.
+    fwd_idx : np.array
+        Has one entry for each reaction in the model containing the index of
+        the respective forward variable.
+    rev_idx : np.array
+        Has one entry for each reaction in the model containing the index of
+        the respective reverse variable.
+    prev : numpy.array
+        The current/last flux sample generated.
+    center : numpy.array
+        The center of the sampling space as estimated by the mean of all
+        previously generated samples.
+
+    Notes
+    -----
+    The sampler is very similar to artificial centering where each process
+    samples its own chain. Initial points are chosen randomly from the warmup
+    points followed by a linear transformation that pulls the points towards
+    the a little bit towards the center of the sampling space.
+
+    If the number of processes used is larger than one the requested
+    number of samples is adjusted to the smallest multiple of the number of
+    processes larger than the requested sample number. For instance, if you
+    have 3 processes and request 8 samples you will receive 9.
+
+    Memory usage is roughly in the order of (2 * number reactions)^2
+    due to the required nullspace matrices and warmup points. So large
+    models easily take up a few GB of RAM. However, most of the large matrices
+    are kept in shared memory. So the RAM usage is independent of the number
+    of processes.
+
+    References
+    ----------
+    .. [1] Megchelenbrink W, Huynen M, Marchiori E (2014)
+       optGpSampler: An Improved Tool for Uniformly Sampling the Solution-Space
+       of Genome-Scale Metabolic Networks.
+       PLoS ONE 9(2): e86587.
+       https://doi.org/10.1371/journal.pone.0086587
+    """
+
+    def __init__(self, model, processes, thinning=100, seed=None):
+        """Initialize a new OptGPSampler."""
+        super(OptGPSampler, self).__init__(model, thinning, seed=seed)
+        self.generate_fva_warmup()
+        self.np = processes
+
+        # This maps our saved center into shared memory,
+        # meaning they are synchronized across processes
+        self.center = shared_np_array((len(model.variables), ),
+                                      self.warmup.mean(axis=0))
+
+    def sample(self, n, fluxes=True):
+        """Generate a set of samples.
+
+        This is the basic sampling function for all hit-and-run samplers.
+
+        Paramters
+        ---------
+        n : int
+            The minimum number of samples that are generated at once
+            (see Notes).
+        fluxes : boolean
+            Whether to return fluxes or the internal solver variables. If set
+            to False will return a variable for each forward and backward flux
+            as well as all additional variables you might have defined in the
+            model.
+
+        Returns
+        -------
+        numpy.matrix
+            Returns a matrix with `n` rows, each containing a flux sample.
+
+        Notes
+        -----
+        Performance of this function linearly depends on the number
+        of reactions in your model and the thinning factor.
+
+        If the number of processes is larger than one, computation is split
+        across as the CPUs of your machine. This may shorten computation time.
+        However, there is also overhead in setting up parallel computation so
+        we recommend to calculate large numbers of samples at once
+        (`n` > 1000).
+        """
+        if self.np > 1:
+            n_process = np.ceil(n / self.np).astype(int)
+            n = n_process * self.np
+            # The cast to list is weird but not doing it gives recursion
+            # limit errors, something weird going on with multiprocessing
+            args = list(zip([n_process] * self.np, range(self.np)))
+            # No with statement or starmap here since Python 2.x
+            # does not support it :(
+            mp = Pool(self.np, initializer=mp_init, initargs=(self,))
+            chains = mp.map(_sample_chain, args, chunksize=1)
+            mp.close()
+            mp.join()
+            chains = np.vstack(chains)
+        else:
+            mp_init(self)
+            chains = _sample_chain((n, 0))
+
+        # Update the global center
+        self.center = (self.n_samples * self.center +
+                       n * np.atleast_2d(chains).mean(axis=0)) / (
+                       self.n_samples + n)
+        self.n_samples += n
+
+        if fluxes:
+            names = [r.id for r in self.model.reactions]
+            return pandas.DataFrame(
+                chains[:, self.fwd_idx] - chains[:, self.rev_idx],
+                columns=names)
+        else:
+            names = [v.name for v in self.model.variables]
+            return pandas.DataFrame(chains, columns=names)
+
+    # Models can be large so don't pass them around during multiprocessing
+    def __getstate__(self):
+        """Return the object for serialization."""
+        d = dict(self.__dict__)
+        del d['model']
+        return d
+
+
+def sample(model, n, method="optgp", thinning=100, processes=1, seed=None):
+    """Sample valid flux distributions from a cobra model.
+
+    The function samples valid flux distributions from a cobra model.
+    Currently we support two methods:
+
+    1. 'optgp' (default) which uses the OptGPSampler that supports parallel
+        sampling [1]_. Requires large numbers of samples to be performant
+        (n < 1000). For smaller samples 'achr' might be better suited.
+
+    or
+
+    2. 'achr' which uses artificial centering hit-and-run. This is a single
+       process method with good convergence [2]_.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model from which to sample flux distributions.
+    n : int
+        The number of samples to obtain. When using 'optgp' this must be a
+        multiple of `processes`, otherwise a larger number of samples will be
+        returned.
+    method : str, optional
+        The sampling algorithm to use.
+    thinning : int, optional
+        The thinning factor of the generated sampling chain. A thinning of 10
+        means samples are returned every 10 steps. Defaults to 100 which in
+        benchmarks gives approximately uncorrelated samples. If set to one
+        will return all iterates.
+    processes : int, optional
+        Only used for 'optgp'. The number of processes used to generate
+        samples.
+    seed : positive integer, optional
+        The random number seed to be used. Initialized to current time stamp
+        if None.
+
+    Returns
+    -------
+    pandas.DataFrame
+        The generated flux samples. Each row corresponds to a sample of the
+        fluxes and the columns are the reactions.
+
+    Notes
+    -----
+    The samplers have a correction method to ensure equality feasibility for
+    long-running chains, however this will only work for homogeneous models,
+    meaning models with no non-zero fixed variables or constraints (
+    right-hand side of the equalities are zero).
+
+    References
+    ----------
+    .. [1] Megchelenbrink W, Huynen M, Marchiori E (2014)
+       optGpSampler: An Improved Tool for Uniformly Sampling the Solution-Space
+       of Genome-Scale Metabolic Networks.
+       PLoS ONE 9(2): e86587.
+    .. [2] Direction Choice for Accelerated Convergence in Hit-and-Run Sampling
+       David E. Kaufman Robert L. Smith
+       Operations Research 199846:1 , 84-95
+    """
+    if method == "optgp":
+        sampler = OptGPSampler(model, processes, thinning=thinning, seed=seed)
+    elif method == "achr":
+        sampler = ACHRSampler(model, thinning=thinning, seed=seed)
+    else:
+        raise ValueError("method must be 'optgp' or 'achr'!")
+
+    return pandas.DataFrame(columns=[rxn.id for rxn in model.reactions],
+                            data=sampler.sample(n))
diff --git a/cobra/flux_analysis/single_deletion.py b/cobra/flux_analysis/single_deletion.py
index f1b3027..b4b06f4 100644
--- a/cobra/flux_analysis/single_deletion.py
+++ b/cobra/flux_analysis/single_deletion.py
@@ -1,43 +1,57 @@
-from warnings import warn
+# -*- coding: utf-8 -*-
 
-from six import string_types, iteritems
+"""Bundles functions for successively deleting a set of genes or reactions."""
 
-from ..manipulation import delete_model_genes, undelete_model_genes
-from ..manipulation.delete import find_gene_knockout_reactions
-from ..solvers import solver_dict, get_solver_name
+from __future__ import absolute_import
+
+import pandas
+from six import iteritems, string_types
+from optlang.interface import OPTIMAL
+
+import cobra.solvers as legacy_solvers
+import cobra.util.solver as solvers
+from cobra.manipulation import delete_model_genes, undelete_model_genes
+from cobra.manipulation.delete import find_gene_knockout_reactions
+
+# this can be removed after deprecation of the old solver interface
+# since the optlang vrsion requires neither numpy nor scipy
 try:
-    import scipy
+    import scipy  # noqa
 except ImportError:
     moma = None
 else:
-    from . import moma
-
-
-def single_deletion(cobra_model, element_list=None,
-                    element_type='gene', **kwargs):
-    """Wrapper for single_gene_deletion and single_reaction_deletion
-
-    .. deprecated :: 0.4
-        Use single_reaction_deletion and single_gene_deletion
-    """
-    warn("deprecated - use single_reaction_deletion and single_gene_deletion")
-    if element_type == "reaction":
-        return single_reaction_deletion(cobra_model, element_list, **kwargs)
-    elif element_type == "gene":
-        return single_gene_deletion(cobra_model, element_list, **kwargs)
-    else:
-        raise Exception("unknown element type")
+    from cobra.flux_analysis import moma
 
 
 def single_reaction_deletion(cobra_model, reaction_list=None, solver=None,
                              method="fba", **solver_args):
-    """sequentially knocks out each reaction in a model
-
-    reaction_list: list of reaction_ids or cobra.Reaction
-
-    method: "fba" or "moma"
-
-    returns ({reaction_id: growth_rate}, {reaction_id: status})"""
+    """Sequentially knocks out each reaction from a given reaction list.
+
+    Parameters
+    ----------
+    cobra_model : cobra.Model
+        The model from which to delete the reactions. The model will not be
+        modified.
+    reaction_list : iterable
+        List of reaction IDs or cobra.Reaction. If None (default) will use all
+        reactions in the model.
+    method : str, optional
+        The method used to obtain fluxes. Must be one of "fba", "moma" or
+        "linear moma".
+    solver : str, optional
+        Name of the solver to be used.
+    solver_args : optional
+        Additional arguments for the solver. Ignored for optlang solver, please
+        use `model.solver.configuration` instead.
+
+    Returns
+    -------
+    pandas.DataFrame
+        Data frame with two column and reaction id as index:
+        - flux: the value of the objective after the knockout
+        - status: the solution's status, (for instance "optimal" for each
+          knockout)
+    """
     if reaction_list is None:
         reaction_list = cobra_model.reactions
     else:
@@ -45,80 +59,183 @@ def single_reaction_deletion(cobra_model, reaction_list=None, solver=None,
                          if isinstance(i, string_types) else i
                          for i in reaction_list]
     if method == "fba":
-        return single_reaction_deletion_fba(cobra_model, reaction_list,
-                                            solver=solver, **solver_args)
+        result = single_reaction_deletion_fba(cobra_model, reaction_list,
+                                              solver=solver, **solver_args)
     elif method == "moma":
-        return single_reaction_deletion_moma(cobra_model, reaction_list,
-                                             solver=solver, **solver_args)
+        result = single_reaction_deletion_moma(cobra_model, reaction_list,
+                                               solver=solver, **solver_args)
+    elif method == "linear moma":
+        result = single_reaction_deletion_moma(cobra_model, reaction_list,
+                                               linear=True, solver=solver,
+                                               **solver_args)
     else:
         raise ValueError("Unknown deletion method '%s'" % method)
+    return pandas.DataFrame({'flux': result[0], 'status': result[1]})
 
 
 def single_reaction_deletion_fba(cobra_model, reaction_list, solver=None,
                                  **solver_args):
-    """sequentially knocks out each reaction in a model using FBA
-
-    reaction_list: list of reaction_ids or cobra.Reaction
-
-    method: "fba" or "moma"
-
-    returns ({reaction_id: growth_rate}, {reaction_id: status})"""
-
-    solver = solver_dict[get_solver_name() if solver is None else solver]
-    lp = solver.create_problem(cobra_model)
+    """Sequentially knocks out each reaction in a model using FBA.
+
+    Not supposed to be called directly use
+    `single_reactions_deletion(..., method="fba")` instead.
+
+    Parameters
+    ----------
+    cobra_model : cobra.Model
+        The model from which to delete the reactions. The model will not be
+        modified.
+    reaction_list : iterable
+        List of reaction Ids or cobra.Reaction.
+    solver: str, optional
+        The name of the solver to be used.
+
+    Returns
+    -------
+    tuple of dicts
+        A tuple ({reaction_id: growth_rate}, {reaction_id: status})
+    """
+    legacy = False
+    if solver is None:
+        solver = cobra_model.solver
+    elif "optlang-" in solver:
+        solver = solvers.interface_to_str(solver)
+        solver = solvers.solvers[solver]
+    else:
+        legacy = True
+        solver = legacy_solvers.solver_dict[solver]
+        lp = solver.create_problem(cobra_model)
 
     growth_rate_dict = {}
     status_dict = {}
-    for reaction in reaction_list:
-        old_bounds = (reaction.lower_bound, reaction.upper_bound)
-        index = cobra_model.reactions.index(reaction)
-        solver.change_variable_bounds(lp, index, 0., 0.)
-        solver.solve_problem(lp, **solver_args)
-        # get the status and growth rate
-        status = solver.get_status(lp)
-        status_dict[reaction.id] = status
-        growth_rate_dict[reaction.id] = solver.get_objective_value(lp) \
-            if status == "optimal" else 0.
-        # reset the problem
-        solver.change_variable_bounds(lp, index, old_bounds[0], old_bounds[1])
-    return (growth_rate_dict, status_dict)
 
-
-def single_reaction_deletion_moma(cobra_model, reaction_list, solver=None,
-                                  **solver_args):
-    """sequentially knocks out each reaction in a model using MOMA
-
-    reaction_list: list of reaction_ids or cobra.Reaction
-
-
-    returns ({reaction_id: growth_rate}, {reaction_id: status})"""
+    if not legacy:
+        with cobra_model as m:
+            m.solver = solver
+            for reaction in reaction_list:
+                with m:
+                    reaction.knock_out()
+                    obj_value = m.slim_optimize()
+                    status_dict[reaction.id] = m.solver.status
+                    growth_rate_dict[reaction.id] = obj_value
+    else:
+        # This entire block can be removed once the legacy solvers are
+        # deprecated
+        for reaction in reaction_list:
+            old_bounds = (reaction.lower_bound, reaction.upper_bound)
+            index = cobra_model.reactions.index(reaction)
+            solver.change_variable_bounds(lp, index, 0., 0.)
+            solver.solve_problem(lp, **solver_args)
+            # get the status and growth rate
+            status = solver.get_status(lp)
+            status_dict[reaction.id] = status
+            growth_rate_dict[reaction.id] = solver.get_objective_value(lp) \
+                if status == "optimal" else 0.
+            # reset the problem
+            solver.change_variable_bounds(lp, index, old_bounds[0],
+                                          old_bounds[1])
+    return growth_rate_dict, status_dict
+
+
+def single_reaction_deletion_moma(cobra_model, reaction_list, linear=False,
+                                  solver=None, **solver_args):
+    """Sequentially knocks out each reaction in a model using MOMA.
+
+    Not supposed to be called directly use
+    `single_reactions_deletion(..., method="moma")` instead.
+
+    Parameters
+    ----------
+    cobra_model : cobra.Model
+        The model from which to delete the reactions. The model will not be
+        modified.
+    reaction_list : iterable
+        List of reaction IDs or cobra.Reaction.
+    linear : bool
+        Whether to use linear MOMA.
+    solver: str, optional
+        The name of the solver to be used.
+
+    Returns
+    -------
+    tuple of dicts
+        A tuple ({reaction_id: growth_rate}, {reaction_id: status})
+    """
     # The same function can not be used because MOMA can not re-use the
     # same LP object. Problem re-use leads to incorrect solutions.
+    # This is *not* true for optlang solvers!
     if moma is None:
         raise RuntimeError("scipy required for moma")
-    solver = solver_dict[solver if solver else get_solver_name(qp=True)]
-    moma_model, moma_objective = moma.create_euclidian_moma_model(cobra_model)
+
+    legacy = False
+    if solver is None:
+        solver = cobra_model.solver
+    elif "optlang-" in solver:
+        solver = solvers.interface_to_str(solver)
+        solver = solvers.solvers[solver]
+    else:
+        legacy = True
+        solver = legacy_solvers.solver_dict[solver]
+        moma_model, moma_objective = moma.create_euclidian_moma_model(
+            cobra_model)
 
     growth_rate_dict = {}
     status_dict = {}
-    for reaction in reaction_list:
-        index = cobra_model.reactions.index(reaction)
-        solution = moma.moma_knockout(moma_model, moma_objective, (index,),
-                                      solver=solver, **solver_args)
-        status_dict[reaction.id] = solution.status
-        growth_rate_dict[reaction.id] = solution.f
-    return (growth_rate_dict, status_dict)
+
+    if not legacy:
+        solution = cobra_model.optimize()
+        with cobra_model as m:
+            m.solver = solver
+            moma.add_moma(m, solution=solution, linear=linear)
+            for reaction in reaction_list:
+                with m:
+                    reaction.knock_out()
+                    status = m.solver.optimize()
+                    status_dict[reaction.id] = status
+                    if status == OPTIMAL:
+                        growth = m.variables.moma_old_objective.primal
+                    else:
+                        growth = float("nan")
+                    growth_rate_dict[reaction.id] = growth
+    else:
+        for reaction in reaction_list:
+            index = cobra_model.reactions.index(reaction)
+            solution = moma.moma_knockout(moma_model, moma_objective, (index,),
+                                          solver=solver, **solver_args)
+            status_dict[reaction.id] = solution.status
+            growth_rate_dict[reaction.id] = solution.f
+    return growth_rate_dict, status_dict
 
 
 def single_gene_deletion(cobra_model, gene_list=None, solver=None,
                          method="fba", **solver_args):
-    """sequentially knocks out each gene in a model
-
-    gene_list: list of gene_ids or cobra.Gene
-
-    method: "fba" or "moma"
-
-    returns ({gene_id: growth_rate}, {gene_id: status})"""
+    """Sequentially knocks out each gene from a given gene list.
+
+    Parameters
+    ----------
+    cobra_model : a cobra model
+        The model from which to delete the genes. The model will not be
+        modified.
+    gene_list : iterable
+        List of gene IDs or cobra.Gene. If None (default) will use all genes in
+        the model.
+    method : str, optional
+        The method used to obtain fluxes. Must be one of "fba", "moma" or
+        "linear moma".
+    solver : str, optional
+        Name of the solver to be used.
+    solver_args : optional
+        Additional arguments for the solver. Ignored for optlang solver, please
+        use `model.solver.configuration` instead.
+
+    Returns
+    -------
+    pandas.DataFrame
+        Data frame with two column and reaction id as index:
+        - flux: the value of the objective after the knockout
+        - status: the solution's status, (for instance "optimal" for each
+          knockout)
+    """
     if gene_list is None:
         gene_list = cobra_model.genes
     else:
@@ -126,56 +243,141 @@ def single_gene_deletion(cobra_model, gene_list=None, solver=None,
                      if isinstance(i, string_types) else i for i in gene_list]
 
     if method == "fba":
-        return single_gene_deletion_fba(cobra_model, gene_list,
-                                        solver=solver, **solver_args)
+        result = single_gene_deletion_fba(cobra_model, gene_list,
+                                          solver=solver, **solver_args)
     elif method == "moma":
-        return single_gene_deletion_moma(cobra_model, gene_list,
-                                         solver=solver, **solver_args)
+        result = single_gene_deletion_moma(cobra_model, gene_list,
+                                           solver=solver, **solver_args)
+    elif method == "linear moma":
+        result = single_gene_deletion_moma(cobra_model, gene_list, linear=True,
+                                           solver=solver, **solver_args)
     else:
         raise ValueError("Unknown deletion method '%s'" % method)
+    return pandas.DataFrame({'flux': result[0], 'status': result[1]})
 
 
 def single_gene_deletion_fba(cobra_model, gene_list, solver=None,
                              **solver_args):
-
-    solver = solver_dict[get_solver_name() if solver is None else solver]
-    lp = solver.create_problem(cobra_model)
+    """Sequentially knocks out each gene in a model using FBA.
+
+    Not supposed to be called directly use
+    `single_reactions_deletion(..., method="fba")` instead.
+
+    Parameters
+    ----------
+    gene_list : iterable
+        List of gene IDs or cobra.Reaction.
+    solver: str, optional
+        The name of the solver to be used.
+
+    Returns
+    -------
+    tuple of dicts
+        A tuple ({reaction_id: growth_rate}, {reaction_id: status})
+    """
+    legacy = False
+    if solver is None:
+        solver = cobra_model.solver
+    elif "optlang-" in solver:
+        solver = solvers.interface_to_str(solver)
+        solver = solvers.solvers[solver]
+    else:
+        legacy = True
+        solver = legacy_solvers.solver_dict[solver]
+        lp = solver.create_problem(cobra_model)
 
     growth_rate_dict = {}
     status_dict = {}
-    for gene in gene_list:
-        old_bounds = {}
-        for reaction in find_gene_knockout_reactions(cobra_model, [gene]):
-            index = cobra_model.reactions.index(reaction)
-            old_bounds[index] = (reaction.lower_bound, reaction.upper_bound)
-            solver.change_variable_bounds(lp, index, 0., 0.)
-        solver.solve_problem(lp, **solver_args)
-        # get the status and growth rate
-        status = solver.get_status(lp)
-        status_dict[gene.id] = status
-        growth_rate = solver.get_objective_value(lp) \
-            if status == "optimal" else 0.
-        growth_rate_dict[gene.id] = growth_rate
-        # reset the problem
-        for index, bounds in iteritems(old_bounds):
-            solver.change_variable_bounds(lp, index, bounds[0], bounds[1])
-    return (growth_rate_dict, status_dict)
-
-
-def single_gene_deletion_moma(cobra_model, gene_list, solver=None,
-                              **solver_args):
+
+    if not legacy:
+        with cobra_model as m:
+            m.solver = solver
+            for gene in gene_list:
+                with m:
+                    gene.knock_out()
+                    obj_value = m.slim_optimize()
+                    status_dict[gene.id] = m.solver.status
+                    growth_rate_dict[gene.id] = obj_value
+    else:
+        for gene in gene_list:
+            old_bounds = {}
+            for reaction in find_gene_knockout_reactions(cobra_model, [gene]):
+                index = cobra_model.reactions.index(reaction)
+                old_bounds[index] = reaction.bounds
+                solver.change_variable_bounds(lp, index, 0., 0.)
+            solver.solve_problem(lp, **solver_args)
+            # get the status and growth rate
+            status = solver.get_status(lp)
+            status_dict[gene.id] = status
+            growth_rate = solver.get_objective_value(lp) \
+                if status == "optimal" else 0.
+            growth_rate_dict[gene.id] = growth_rate
+            # reset the problem
+            for index, bounds in iteritems(old_bounds):
+                solver.change_variable_bounds(lp, index, bounds[0], bounds[1])
+    return growth_rate_dict, status_dict
+
+
+def single_gene_deletion_moma(cobra_model, gene_list, linear=False,
+                              solver=None, **solver_args):
+    """Sequentially knocks out each gene in a model using MOMA.
+
+    Not supposed to be called directly use
+    `single_reactions_deletion(..., method="moma")` instead.
+
+    Parameters
+    ----------
+    gene_list : iterable
+        List of gene IDs or cobra.Reaction.
+    linear : bool
+        Whether to use linear MOMA.
+    solver : str, optional
+        The name of the solver to be used.
+
+    Returns
+    -------
+    tuple of dicts
+        A tuple ({reaction_id: growth_rate}, {reaction_id: status})
+    """
     if moma is None:
         raise RuntimeError("scipy required for moma")
-    solver = solver if solver else get_solver_name(qp=True)
-    moma_model, moma_objective = moma.create_euclidian_moma_model(cobra_model)
+
+    legacy = False
+    if solver is None:
+        solver = cobra_model.solver
+    elif "optlang-" in solver:
+        solver = solvers.interface_to_str(solver)
+        solver = solvers.solvers[solver]
+    else:
+        legacy = True
+        solver = legacy_solvers.solver_dict[solver]
+        moma_model, moma_objective = moma.create_euclidian_moma_model(
+            cobra_model)
 
     growth_rate_dict = {}
     status_dict = {}
-    for gene in gene_list:
-        delete_model_genes(moma_model, [gene.id])
-        solution = moma.solve_moma_model(moma_model, moma_objective,
-                                         solver=solver, **solver_args)
-        status_dict[gene.id] = solution.status
-        growth_rate_dict[gene.id] = solution.f
-        undelete_model_genes(moma_model)
-    return (growth_rate_dict, status_dict)
+
+    if not legacy:
+        solution = cobra_model.optimize()
+        with cobra_model as m:
+            m.solver = solver
+            moma.add_moma(m, solution=solution, linear=linear)
+            for gene in gene_list:
+                with m:
+                    gene.knock_out()
+                    status = m.solver.optimize()
+                    status_dict[gene.id] = status
+                    if status == OPTIMAL:
+                        growth = m.variables.moma_old_objective.primal
+                    else:
+                        growth = float("nan")
+                    growth_rate_dict[gene.id] = growth
+    else:
+        for gene in gene_list:
+            delete_model_genes(moma_model, [gene.id])
+            solution = moma.solve_moma_model(moma_model, moma_objective,
+                                             solver=solver, **solver_args)
+            status_dict[gene.id] = solution.status
+            growth_rate_dict[gene.id] = solution.f
+            undelete_model_genes(moma_model)
+    return growth_rate_dict, status_dict
diff --git a/cobra/flux_analysis/summary.py b/cobra/flux_analysis/summary.py
index f705e91..c75f4b6 100644
--- a/cobra/flux_analysis/summary.py
+++ b/cobra/flux_analysis/summary.py
@@ -1,10 +1,16 @@
-from six.moves import zip_longest
-from six import print_, iteritems
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
 
 import pandas as pd
+from numpy import zeros
+from six import iteritems, print_
+from six.moves import zip_longest
 from tabulate import tabulate
 
-from .variability import flux_variability_analysis
+from cobra.flux_analysis.variability import flux_variability_analysis
+from cobra.util.solver import linear_reaction_coefficients
+from cobra.core import get_solution
 
 
 def format_long_string(string, max_length):
@@ -14,53 +20,60 @@ def format_long_string(string, max_length):
     return string
 
 
-def metabolite_summary(met, threshold=0.01, fva=False, floatfmt='.3g',
-                       **solver_args):
+def metabolite_summary(met, solution=None, threshold=0.01, fva=False,
+                       floatfmt='.3g'):
     """Print a summary of the reactions which produce and consume this
     metabolite
 
-    threshold: float
-    a value below which to ignore reaction fluxes
+    solution : cobra.core.Solution
+        A previously solved model solution to use for generating the
+        summary. If none provided (default), the summary method will resolve
+        the model. Note that the solution object must match the model, i.e.,
+        changes to the model such as changed bounds, added or removed
+        reactions are not taken into account by this method.
 
-    fva: float (0->1), or None
-    Whether or not to include flux variability analysis in the output.
-    If given, fva should be a float between 0 and 1, representing the
-    fraction of the optimum objective to be searched.
+    threshold : float
+        a value below which to ignore reaction fluxes
 
-    floatfmt: string
+    fva : float (0->1), or None
+        Whether or not to include flux variability analysis in the output.
+        If given, fva should be a float between 0 and 1, representing the
+        fraction of the optimum objective to be searched.
+
+    floatfmt : string
         format method for floats, passed to tabulate. Default is '.3g'.
 
     """
+    if solution is None:
+        met.model.slim_optimize(error_value=None)
+        solution = get_solution(met.model, reactions=met.reactions)
 
-    def rxn_summary(r):
-        out = {
-            'id': format_long_string(r.id, 10),
-            'flux': r.x * r.metabolites[met],
-            'reaction': format_long_string(r.reaction, 40 if fva else 50),
-        }
-
-        if rxn_summary.fva_results is not False:
-            fmax = rxn_summary.fva_results.loc[r.id, 'maximum']
-            fmin = rxn_summary.fva_results.loc[r.id, 'minimum']
-            imax = r.metabolites[met] * fmax
-            imin = r.metabolites[met] * fmin
+    rxn_id = list()
+    flux = list()
+    reaction = list()
+    for rxn in met.reactions:
+        rxn_id.append(format_long_string(rxn.id, 10))
+        flux.append(solution.fluxes[rxn.id] * rxn.metabolites[met])
+        reaction.append(format_long_string(rxn.reaction, 40 if fva else 50))
 
-            # Correct 'max' and 'min' for negative values
-            out.update({
-                'fmin': imin if abs(imin) <= abs(imax) else imax,
-                'fmax': imax if abs(imin) <= abs(imax) else imin,
-            })
-
-        return out
+    flux_summary = pd.DataFrame(data={
+        "id": rxn_id, "flux": flux, "reaction": reaction})
 
     if fva:
-        rxn_summary.fva_results = pd.DataFrame(flux_variability_analysis(
-            met.model, met.reactions, fraction_of_optimum=fva,
-            **solver_args)).T
-    else:
-        rxn_summary.fva_results = False
+        fva_results = flux_variability_analysis(
+            met.model, met.reactions, fraction_of_optimum=fva)
+
+        flux_summary.index = flux_summary["id"]
+        flux_summary["maximum"] = zeros(len(rxn_id))
+        flux_summary["minimum"] = zeros(len(rxn_id))
+        for rid, rxn in zip(rxn_id, met.reactions):
+            imax = rxn.metabolites[met] * fva_results.loc[rxn.id, "maximum"]
+            imin = rxn.metabolites[met] * fva_results.loc[rxn.id, "minimum"]
+            flux_summary.loc[rid, "fmax"] = (imax if abs(imin) <= abs(imax)
+                                             else imin)
+            flux_summary.loc[rid, "fmin"] = (imin if abs(imin) <= abs(imax)
+                                             else imax)
 
-    flux_summary = pd.DataFrame((rxn_summary(r) for r in met.reactions))
     assert flux_summary.flux.sum() < 1E-6, "Error in flux balance"
 
     flux_summary = _process_flux_dataframe(flux_summary, fva, threshold,
@@ -108,49 +121,60 @@ def metabolite_summary(met, threshold=0.01, fva=False, floatfmt='.3g',
         pd.np.array(flux_table[2:])[~flux_summary.is_input.values]))
 
 
-def model_summary(model, threshold=1E-8, fva=None, floatfmt='.3g',
-                  **solver_args):
+def model_summary(model, solution=None, threshold=1E-8, fva=None,
+                  floatfmt='.3g'):
     """Print a summary of the input and output fluxes of the model.
 
-    threshold: float
+    solution : cobra.core.Solution
+        A previously solved model solution to use for generating the
+        summary. If none provided (default), the summary method will resolve
+        the model. Note that the solution object must match the model, i.e.,
+        changes to the model such as changed bounds, added or removed
+        reactions are not taken into account by this method.
+
+    threshold : float
         tolerance for determining if a flux is zero (not printed)
 
-    fva: int or None
+    fva : int or None
         Whether or not to calculate and report flux variability in the
         output summary
 
-    floatfmt: string
+    floatfmt : string
         format method for floats, passed to tabulate. Default is '.3g'.
 
     """
+    objective_reactions = linear_reaction_coefficients(model)
+    boundary_reactions = model.exchanges
+    summary_rxns = list(objective_reactions.keys()) + boundary_reactions
+
+    if solution is None:
+        model.slim_optimize(error_value=None)
+        solution = get_solution(model, reactions=summary_rxns)
 
     # Create a dataframe of objective fluxes
-    obj_fluxes = pd.DataFrame({key: key.x * value for key, value in
-                               iteritems(model.objective)}, index=['flux']).T
+    obj_fluxes = pd.DataFrame({key: solution.fluxes[key.id] * value for key,
+                               value in iteritems(objective_reactions)},
+                              index=['flux']).T
     obj_fluxes['id'] = obj_fluxes.apply(
         lambda x: format_long_string(x.name.id, 15), 1)
 
     # Build a dictionary of metabolite production from the boundary reactions
-    boundary_reactions = model.reactions.query(lambda x: x, 'boundary')
-
-    # Calculate FVA results if requested
-    if fva:
-        fva_results = pd.DataFrame(
-            flux_variability_analysis(model, reaction_list=boundary_reactions,
-                                      fraction_of_optimum=fva,
-                                      **solver_args)).T
-
     metabolite_fluxes = {}
     for rxn in boundary_reactions:
         for met, stoich in iteritems(rxn.metabolites):
             metabolite_fluxes[met] = {
                 'id': format_long_string(met.id, 15),
-                'flux': stoich * rxn.x}
+                'flux': stoich * solution.fluxes[rxn.id]}
 
-            if fva:
+    # Calculate FVA results if requested
+    if fva:
+        fva_results = flux_variability_analysis(
+            model, reaction_list=boundary_reactions, fraction_of_optimum=fva)
+
+        for rxn in boundary_reactions:
+            for met, stoich in iteritems(rxn.metabolites):
                 imin = stoich * fva_results.loc[rxn.id]['minimum']
                 imax = stoich * fva_results.loc[rxn.id]['maximum']
-
                 # Correct 'max' and 'min' for negative values
                 metabolite_fluxes[met].update({
                     'fmin': imin if abs(imin) <= abs(imax) else imax,
@@ -202,11 +226,13 @@ def _process_flux_dataframe(flux_dataframe, fva, threshold, floatfmt):
             (flux_dataframe.fmin.abs() > threshold) |
             (flux_dataframe.fmax.abs() > threshold)].copy()
 
+        flux_dataframe.loc[flux_dataframe.flux.abs() < threshold, 'flux'] = 0
+
     # Make all fluxes positive
     if not fva:
         flux_dataframe['is_input'] = flux_dataframe.flux >= 0
         flux_dataframe.flux = \
-            flux_dataframe.flux.abs().astype('float').round(6)
+            flux_dataframe.flux.abs().astype('float')
     else:
 
         def get_direction(flux, fmin, fmax):
@@ -220,7 +246,7 @@ def _process_flux_dataframe(flux_dataframe, fva, threshold, floatfmt):
                 return 1
             elif (fmax < 0) & (fmin >= 0):
                 return -1
-            elif ((fmax + fmin)/2) < 0:
+            elif ((fmax + fmin) / 2) < 0:
                 return -1
             else:
                 return 1
diff --git a/cobra/flux_analysis/variability.py b/cobra/flux_analysis/variability.py
index fd93ad3..f3e21b2 100644
--- a/cobra/flux_analysis/variability.py
+++ b/cobra/flux_analysis/variability.py
@@ -1,19 +1,118 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import pandas
+from sympy.core.singleton import S
 from warnings import warn
+from itertools import chain
+
+from cobra.flux_analysis.loopless import loopless_fva_iter
+from cobra.flux_analysis.parsimonious import add_pfba
+from cobra.flux_analysis.single_deletion import (single_gene_deletion,
+                                                 single_reaction_deletion)
+from cobra.core import get_solution
+from cobra.util import solver as sutil
+
+
+def flux_variability_analysis(model, reaction_list=None, loopless=False,
+                              fraction_of_optimum=1.0, pfba_factor=None,
+                              solver=None, **solver_args):
+    """Runs flux variability analysis to find the min/max flux values for each
+    each reaction in `reaction_list`.
+
+    Parameters
+    ----------
+    model : a cobra model
+        The model for which to run the analysis. It will *not* be modified.
+    reaction_list : list of cobra.Reaction or str, optional
+        The reactions for which to obtain min/max fluxes. If None will use
+        all reactions in the model.
+    loopless : boolean, optional
+        Whether to return only loopless solutions. Ignored for legacy solvers,
+        also see `Notes`.
+    fraction_of_optimum : float, optional
+        Must be <= 1.0. Requires that the objective value is at least
+        fraction * max_objective_value. A value of 0.85 for instance means that
+        the objective has to be at least at 85% percent of its maximum.
+    pfba_factor : float, optional
+        Add additional constraint to the model that the total sum of
+        absolute fluxes must not be larger than this value times the
+        smallest possible sum of absolute fluxes, i.e., by setting the value
+        to 1.1 then the total sum of absolute fluxes must not be more than
+        10% larger than the pfba solution. Since the pfba solution is the
+        one that optimally minimizes the total flux sum, the pfba_factor
+        should, if set, be larger than one. Setting this value may lead to
+        more realistic predictions of the effective flux bounds.
+    solver : str, optional
+        Name of the solver to be used. If None it will respect the solver set
+        in the model (model.solver).
+    **solver_args : additional arguments for legacy solver, optional
+        Additional arguments passed to the legacy solver. Ignored for
+        optlang solver (those can be configured using
+        model.solver.configuration).
+
+    Returns
+    -------
+    pandas.DataFrame
+        DataFrame with reaction identifier as the index columns
+
+        - maximum: indicating the highest possible flux
+        - minimum: indicating the lowest possible flux
+
+    Notes
+    -----
+    This implements the fast version as described in [1]_. Please note that
+    the flux distribution containing all minimal/maximal fluxes does not have
+    to be a feasible solution for the model. Fluxes are minimized/maximized
+    individually and a single minimal flux might require all others to be
+    suboptimal.
+
+    Using the loopless option will lead to a significant increase in
+    computation time (about a factor of 100 for large models). However, the
+    algorithm used here (see [2]_) is still more than 1000x faster than the
+    "naive" version using `add_loopless(model)`. Also note that if you have
+    included constraints that force a loop (for instance by setting all fluxes
+    in a loop to be non-zero) this loop will be included in the solution.
 
-from six import iteritems
-from ..solvers import solver_dict, get_solver_name
+    References
+    ----------
+    .. [1] Computationally efficient flux variability analysis.
+       Gudmundsson S, Thiele I.
+       BMC Bioinformatics. 2010 Sep 29;11:489.
+       doi: 10.1186/1471-2105-11-489, PMID: 20920235
+
+    .. [2] CycleFreeFlux: efficient removal of thermodynamically infeasible
+       loops from flux distributions.
+       Desouki AA, Jarre F, Gelius-Dietrich G, Lercher MJ.
+       Bioinformatics. 2015 Jul 1;31(13):2159-65.
+       doi: 10.1093/bioinformatics/btv096.
+    """
+    legacy, solver = sutil.choose_solver(model, solver)
+
+    if reaction_list is None:
+        reaction_list = model.reactions
+
+    if not legacy:
+        fva_result = _fva_optlang(model, reaction_list, fraction_of_optimum,
+                                  loopless, pfba_factor)
+    else:
+        if pfba_factor is not None:
+            ValueError('pfba_factor only supported for optlang interfaces')
+        fva_result = _fva_legacy(model, reaction_list, fraction_of_optimum,
+                                 "maximize", solver, **solver_args)
+    return pandas.DataFrame(fva_result).T
 
 
-def flux_variability_analysis(cobra_model, reaction_list=None,
-                              fraction_of_optimum=1.0, solver=None,
-                              objective_sense="maximize", **solver_args):
+def _fva_legacy(cobra_model, reaction_list, fraction_of_optimum,
+                objective_sense, solver, **solver_args):
     """Runs flux variability analysis to find max/min flux values
 
     cobra_model : :class:`~cobra.core.Model`:
 
     reaction_list : list of :class:`~cobra.core.Reaction`: or their id's
         The id's for which FVA should be run. If this is None, the bounds
-        will be comptued for all reactions in the model.
+        will be computed for all reactions in the model.
 
     fraction_of_optimum : fraction of optimum which must be maintained.
         The original objective reaction is constrained to be greater than
@@ -23,12 +122,6 @@ def flux_variability_analysis(cobra_model, reaction_list=None,
         If None is given, the default solver will be used.
 
     """
-    if reaction_list is None and "the_reactions" in solver_args:
-        reaction_list = solver_args.pop("the_reactions")
-        warn("the_reactions is deprecated. Please use reaction_list=")
-    if reaction_list is None:
-        reaction_list = cobra_model.reactions
-    solver = solver_dict[get_solver_name() if solver is None else solver]
     lp = solver.create_problem(cobra_model)
     solver.solve_problem(lp, objective_sense=objective_sense)
     solution = solver.format_solution(lp, cobra_model)
@@ -50,11 +143,11 @@ def flux_variability_analysis(cobra_model, reaction_list=None,
 def calculate_lp_variability(lp, solver, cobra_model, reaction_list,
                              **solver_args):
     """calculate max and min of selected variables in an LP"""
-    fva_results = {str(r): {} for r in reaction_list}
+    fva_results = {r.id: {} for r in reaction_list}
     for what in ("minimum", "maximum"):
         sense = "minimize" if what == "minimum" else "maximize"
         for r in reaction_list:
-            r_id = str(r)
+            r_id = r.id
             i = cobra_model.reactions.index(r_id)
             solver.change_variable_objective(lp, i, 1.)
             solver.solve_problem(lp, objective_sense=sense, **solver_args)
@@ -64,33 +157,200 @@ def calculate_lp_variability(lp, solver, cobra_model, reaction_list,
     return fva_results
 
 
-def find_blocked_reactions(cobra_model, reaction_list=None,
+def _fva_optlang(model, reaction_list, fraction, loopless, pfba_factor):
+    """Helper function to perform FVA with the optlang interface.
+
+    Parameters
+    ----------
+    model : a cobra model
+    reaction_list : list of reactions
+    fraction : float, optional
+        Must be <= 1.0. Requires that the objective value is at least
+        fraction * max_objective_value. A value of 0.85 for instance means that
+        the objective has to be at least at 85% percent of its maximum.
+    loopless : boolean, optional
+        Whether to return only loopless solutions.
+    pfba_factor : float, optional
+        Add additional constraint to the model that the total sum of
+        absolute fluxes must not be larger than this value times the
+        smallest possible sum of absolute fluxes, i.e., by setting the value
+        to 1.1 then the total sum of absolute fluxes must not be more than
+        10% larger than the pfba solution. Setting this value may lead to
+        more realistic predictions of the effective flux bounds.
+
+    Returns
+    -------
+    dict
+        A dictionary containing the results.
+    """
+    prob = model.problem
+    fva_results = {rxn.id: {} for rxn in reaction_list}
+    with model as m:
+        m.slim_optimize(error_value=None,
+                        message="There is no optimal solution for the "
+                                "chosen objective!")
+        # Add objective as a variable to the model than set to zero
+        # This also uses the fraction to create the lower bound for the
+        # old objective
+        fva_old_objective = prob.Variable(
+            "fva_old_objective", lb=fraction * m.solver.objective.value)
+        fva_old_obj_constraint = prob.Constraint(
+            m.solver.objective.expression - fva_old_objective, lb=0, ub=0,
+            name="fva_old_objective_constraint")
+        m.add_cons_vars([fva_old_objective, fva_old_obj_constraint])
+
+        if pfba_factor is not None:
+            if pfba_factor < 1.:
+                warn('pfba_factor should be larger or equal to 1', UserWarning)
+            with m:
+                add_pfba(m, fraction_of_optimum=0)
+                ub = m.slim_optimize(error_value=None)
+                flux_sum = prob.Variable("flux_sum", ub=pfba_factor * ub)
+                flux_sum_constraint = prob.Constraint(
+                    m.solver.objective.expression - flux_sum, lb=0, ub=0,
+                    name="flux_sum_constraint")
+            m.add_cons_vars([flux_sum, flux_sum_constraint])
+
+        m.objective = S.Zero  # This will trigger the reset as well
+        for what in ("minimum", "maximum"):
+            sense = "min" if what == "minimum" else "max"
+            for rxn in reaction_list:
+                r_id = rxn.id
+                rxn = m.reactions.get_by_id(r_id)
+                # The previous objective assignment already triggers a reset
+                # so directly update coefs here to not trigger redundant resets
+                # in the history manager which can take longer than the actual
+                # FVA for small models
+                m.solver.objective.set_linear_coefficients(
+                    {rxn.forward_variable: 1, rxn.reverse_variable: -1})
+                m.solver.objective.direction = sense
+                m.slim_optimize()
+                sutil.check_solver_status(m.solver.status)
+                if loopless:
+                    value = loopless_fva_iter(m, rxn)
+                else:
+                    value = m.solver.objective.value
+                fva_results[r_id][what] = value
+                m.solver.objective.set_linear_coefficients(
+                    {rxn.forward_variable: 0, rxn.reverse_variable: 0})
+
+    return fva_results
+
+
+def find_blocked_reactions(model, reaction_list=None,
                            solver=None, zero_cutoff=1e-9,
                            open_exchanges=False, **solver_args):
     """Finds reactions that cannot carry a flux with the current
-    exchange reaction settings for cobra_model, using flux variability
+    exchange reaction settings for a cobra model, using flux variability
     analysis.
 
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to analyze
+    reaction_list : list
+        List of reactions to consider, use all if left missing
+    solver : string
+        The name of the solver to use
+    zero_cutoff : float
+        Flux value which is considered to effectively be zero.
+    open_exchanges : bool
+        If true, set bounds on exchange reactions to very high values to
+        avoid that being the bottle-neck.
+    **solver_args :
+        Additional arguments to the solver. Ignored for optlang based solvers.
+
+    Returns
+    -------
+    list
+        List with the blocked reactions
     """
-    if solver is None:
-        solver = get_solver_name()
-    if open_exchanges:
-        # should not unnecessarily change model
-        cobra_model = cobra_model.copy()
-        for reaction in cobra_model.reactions:
-            if reaction.boundary:
-                reaction.lower_bound = min(reaction.lower_bound, -1000)
-                reaction.upper_bound = max(reaction.upper_bound, 1000)
-    if reaction_list is None:
-        reaction_list = cobra_model.reactions
-    # limit to reactions which are already 0. If the reactions alread
-    # carry flux in this solution, then they can not be blocked.
-    solution = solver_dict[solver].solve(cobra_model, **solver_args)
-    reaction_list = [i for i in reaction_list
-                     if abs(solution.x_dict[i.id]) < zero_cutoff]
-    # run fva to find reactions where both max and min are 0
-    flux_span_dict = flux_variability_analysis(
-        cobra_model, fraction_of_optimum=0., reaction_list=reaction_list,
-        solver=solver, **solver_args)
-    return [k for k, v in iteritems(flux_span_dict)
-            if max(map(abs, v.values())) < zero_cutoff]
+    legacy, solver_interface = sutil.choose_solver(model, solver)
+    with model:
+        if open_exchanges:
+            for reaction in model.exchanges:
+                reaction.bounds = (min(reaction.lower_bound, -1000),
+                                   max(reaction.upper_bound, 1000))
+        if reaction_list is None:
+            reaction_list = model.reactions
+        # limit to reactions which are already 0. If the reactions already
+        # carry flux in this solution, then they can not be blocked.
+        if legacy:
+            solution = solver_interface.solve(model, **solver_args)
+            reaction_list = [i for i in reaction_list
+                             if abs(solution.x_dict[i.id]) < zero_cutoff]
+        else:
+            model.solver = solver_interface
+            model.slim_optimize()
+            solution = get_solution(model, reactions=reaction_list)
+            reaction_list = [rxn for rxn in reaction_list if
+                             abs(solution.fluxes[rxn.id]) < zero_cutoff]
+        # run fva to find reactions where both max and min are 0
+        flux_span = flux_variability_analysis(
+            model, fraction_of_optimum=0., reaction_list=reaction_list,
+            solver=solver, **solver_args)
+        return [rxn_id for rxn_id, min_max in flux_span.iterrows() if
+                max(abs(min_max)) < zero_cutoff]
+
+
+def find_essential_genes(model, threshold=0.01):
+    """Return a set of essential genes.
+
+    A gene is considered essential if restricting the flux of all reactions
+    that depends on it to zero causes the objective (e.g. the growth rate)
+    to also be zero.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to find the essential genes for.
+    threshold : float (default 0.01)
+        Minimal objective flux to be considered viable.
+
+    Returns
+    -------
+    set
+        Set of essential genes
+    """
+    solution = model.optimize(raise_error=True)
+    tolerance = model.solver.configuration.tolerances.feasibility
+    non_zero_flux_reactions = list(
+        solution[solution.fluxes.abs() > tolerance].index)
+    genes_to_check = set(chain.from_iterable(
+        model.reactions.get_by_id(rid).genes for rid in
+        non_zero_flux_reactions))
+    deletions = single_gene_deletion(model, gene_list=genes_to_check,
+                                     method='fba')
+    gene_ids = list(deletions[(pandas.isnull(deletions.flux)) |
+                              (deletions.flux < threshold)].index)
+    return set(model.genes.get_by_any(gene_ids))
+
+
+def find_essential_reactions(model, threshold=0.01):
+    """Return a set of essential reactions.
+
+    A reaction is considered essential if restricting its flux to zero
+    causes the objective (e.g. the growth rate) to also be zero.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to find the essential reactions for.
+    threshold : float (default 0.01)
+        Minimal objective flux to be considered viable.
+
+    Returns
+    -------
+    set
+        Set of essential reactions
+    """
+    solution = model.optimize(raise_error=True)
+    tolerance = model.solver.configuration.tolerances.feasibility
+    non_zero_flux_reactions = list(
+        solution[solution.fluxes.abs() > tolerance].index)
+    deletions = single_reaction_deletion(model,
+                                         reaction_list=non_zero_flux_reactions,
+                                         method='fba')
+    reaction_ids = list(deletions[(pandas.isnull(deletions.flux)) |
+                                  (deletions.flux < threshold)].index)
+    return set(model.reactions.get_by_any(reaction_ids))
diff --git a/cobra/io/__init__.py b/cobra/io/__init__.py
index 740d4fe..fac8edb 100644
--- a/cobra/io/__init__.py
+++ b/cobra/io/__init__.py
@@ -1,26 +1,14 @@
-from warnings import warn
-
-from .sbml3 import read_sbml_model, write_sbml_model
-from .json import load_json_model, save_json_model, to_json
-
-# These functions have other dependencies
-try:
-    import libsbml
-    from .sbml import read_legacy_sbml
-    from .sbml import write_cobra_model_to_sbml_file as write_legacy_sbml
-except ImportError:
-    warn("cobra.io.sbml requires libsbml")
-    libsbml = None
-    read_legacy_sbml = None
-    write_legacy_sbml = None
-
-try:
-    import scipy
-    from .mat import load_matlab_model, save_matlab_model
-except ImportError:
-    warn("cobra.io.mat requires scipy")
-    scipy = None
-    load_matlab_model = None
-    save_matlab_model = None
-
-del libsbml, scipy, warn
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from cobra.io.dict import (model_from_dict, model_to_dict)
+from cobra.io.json import (
+    to_json, from_json, load_json_model, save_json_model)
+from cobra.io.yaml import (
+    to_yaml, from_yaml, load_yaml_model, save_yaml_model)
+from cobra.io.sbml3 import read_sbml_model, write_sbml_model
+from cobra.io.sbml import read_legacy_sbml
+from cobra.io.sbml import write_cobra_model_to_sbml_file as \
+    write_legacy_sbml
+from cobra.io.mat import load_matlab_model, save_matlab_model
diff --git a/cobra/io/dict.py b/cobra/io/dict.py
new file mode 100644
index 0000000..8670252
--- /dev/null
+++ b/cobra/io/dict.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from collections import OrderedDict
+from operator import attrgetter, itemgetter
+
+from numpy import bool_, float_
+from six import iteritems, string_types
+
+from cobra.core import Gene, Metabolite, Model, Reaction
+from cobra.util.solver import set_objective
+
+_REQUIRED_REACTION_ATTRIBUTES = [
+    "id", "name", "metabolites", "lower_bound", "upper_bound",
+    "gene_reaction_rule"]
+_ORDERED_OPTIONAL_REACTION_KEYS = [
+    "objective_coefficient", "variable_kind", "subsystem", "notes",
+    "annotation"]
+_OPTIONAL_REACTION_ATTRIBUTES = {
+    "objective_coefficient": 0,
+    "variable_kind": "continuous",
+    "subsystem": "",
+    "notes": {},
+    "annotation": {},
+}
+
+_REQUIRED_METABOLITE_ATTRIBUTES = ["id", "name", "compartment"]
+_ORDERED_OPTIONAL_METABOLITE_KEYS = [
+    "charge", "formula", "_bound", "_constraint_sense", "notes", "annotation"]
+_OPTIONAL_METABOLITE_ATTRIBUTES = {
+    "charge": None,
+    "formula": None,
+    "_bound": 0,
+    "_constraint_sense": "E",
+    "notes": {},
+    "annotation": {},
+}
+
+_REQUIRED_GENE_ATTRIBUTES = ["id", "name"]
+_ORDERED_OPTIONAL_GENE_KEYS = ["notes", "annotation"]
+_OPTIONAL_GENE_ATTRIBUTES = {
+    "notes": {},
+    "annotation": {},
+}
+
+_ORDERED_OPTIONAL_MODEL_KEYS = ["name", "compartments", "notes", "annotation"]
+_OPTIONAL_MODEL_ATTRIBUTES = {
+    "name": None,
+    #  "description": None, should not actually be included
+    "compartments": [],
+    "notes": {},
+    "annotation": {},
+}
+
+
+def _fix_type(value):
+    """convert possible types to str, float, and bool"""
+    # Because numpy floats can not be pickled to json
+    if isinstance(value, string_types):
+        return str(value)
+    if isinstance(value, float_):
+        return float(value)
+    if isinstance(value, bool_):
+        return bool(value)
+    if isinstance(value, set):
+        return list(value)
+    # handle legacy Formula type
+    if value.__class__.__name__ == "Formula":
+        return str(value)
+    if value is None:
+        return ""
+    return value
+
+
+def _update_optional(cobra_object, new_dict, optional_attribute_dict,
+                     ordered_keys):
+    """update new_dict with optional attributes from cobra_object"""
+    for key in ordered_keys:
+        default = optional_attribute_dict[key]
+        value = getattr(cobra_object, key)
+        if value is None or value == default:
+            continue
+        new_dict[key] = _fix_type(value)
+
+
+def metabolite_to_dict(metabolite):
+    new_met = OrderedDict()
+    for key in _REQUIRED_METABOLITE_ATTRIBUTES:
+        new_met[key] = _fix_type(getattr(metabolite, key))
+    _update_optional(metabolite, new_met, _OPTIONAL_METABOLITE_ATTRIBUTES,
+                     _ORDERED_OPTIONAL_METABOLITE_KEYS)
+    return new_met
+
+
+def metabolite_from_dict(metabolite):
+    new_metabolite = Metabolite()
+    for k, v in iteritems(metabolite):
+        setattr(new_metabolite, k, v)
+    return new_metabolite
+
+
+def gene_to_dict(gene):
+    new_gene = OrderedDict()
+    for key in _REQUIRED_GENE_ATTRIBUTES:
+        new_gene[key] = _fix_type(getattr(gene, key))
+    _update_optional(gene, new_gene, _OPTIONAL_GENE_ATTRIBUTES,
+                     _ORDERED_OPTIONAL_GENE_KEYS)
+    return new_gene
+
+
+def gene_from_dict(gene):
+    new_gene = Gene(gene["id"])
+    for k, v in iteritems(gene):
+        setattr(new_gene, k, v)
+    return new_gene
+
+
+def reaction_to_dict(reaction):
+    new_reaction = OrderedDict()
+    for key in _REQUIRED_REACTION_ATTRIBUTES:
+        if key != "metabolites":
+            new_reaction[key] = _fix_type(getattr(reaction, key))
+            continue
+        mets = OrderedDict()
+        for met in sorted(reaction.metabolites, key=attrgetter("id")):
+            mets[str(met)] = reaction.metabolites[met]
+        new_reaction["metabolites"] = mets
+    _update_optional(reaction, new_reaction, _OPTIONAL_REACTION_ATTRIBUTES,
+                     _ORDERED_OPTIONAL_REACTION_KEYS)
+    return new_reaction
+
+
+def reaction_from_dict(reaction, model):
+    new_reaction = Reaction()
+    for k, v in iteritems(reaction):
+        if k in {'objective_coefficient', 'reversibility', 'reaction'}:
+            continue
+        elif k == 'metabolites':
+            new_reaction.add_metabolites(OrderedDict(
+                (model.metabolites.get_by_id(str(met)), coeff)
+                for met, coeff in iteritems(v)))
+        else:
+            setattr(new_reaction, k, v)
+    return new_reaction
+
+
+def model_to_dict(model):
+    """Convert model to a dict.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to reformulate as a dict
+
+    Returns
+    -------
+    OrderedDict
+        A dictionary with elements, 'genes', 'compartments', 'id',
+        'metabolites', 'notes' and 'reactions'; where 'metabolites', 'genes'
+        and 'metabolites' are in turn lists with dictionaries holding all
+        attributes to form the corresponding object.
+
+    See Also
+    --------
+    cobra.io.model_from_dict
+    """
+    obj = OrderedDict()
+    obj["reactions"] = sorted(
+        (reaction_to_dict(reaction) for reaction in model.reactions),
+        key=itemgetter("id"))
+    obj["metabolites"] = sorted(
+        (metabolite_to_dict(metabolite) for metabolite in model.metabolites),
+        key=itemgetter("id"))
+    obj["genes"] = sorted(
+        (gene_to_dict(gene) for gene in model.genes), key=itemgetter("id"))
+    obj["id"] = model.id
+    _update_optional(model, obj, _OPTIONAL_MODEL_ATTRIBUTES,
+                     _ORDERED_OPTIONAL_MODEL_KEYS)
+    return obj
+
+
+def model_from_dict(obj):
+    """Build a model from a dict.
+
+    Models stored in json are first formulated as a dict that can be read to
+    cobra model using this function.
+
+    Parameters
+    ----------
+    obj : dict
+        A dictionary with elements, 'genes', 'compartments', 'id',
+        'metabolites', 'notes' and 'reactions'; where 'metabolites', 'genes'
+        and 'metabolites' are in turn lists with dictionaries holding all
+        attributes to form the corresponding object.
+
+    Returns
+    -------
+    cora.core.Model
+        The generated model.
+
+    See Also
+    --------
+    cobra.io.model_to_dict
+    """
+    if 'reactions' not in obj:
+        raise ValueError('Object has no reactions attribute. Cannot load.')
+    model = Model()
+    model.add_metabolites(
+        [metabolite_from_dict(metabolite) for metabolite in obj['metabolites']]
+    )
+    model.genes.extend([gene_from_dict(gene) for gene in obj['genes']])
+    model.add_reactions(
+        [reaction_from_dict(reaction, model) for reaction in obj['reactions']]
+    )
+    objective_reactions = [rxn for rxn in obj['reactions'] if
+                           rxn.get('objective_coefficient', 0) != 0]
+    coefficients = {
+        model.reactions.get_by_id(rxn['id']): rxn['objective_coefficient'] for
+        rxn in objective_reactions}
+    set_objective(model, coefficients)
+    for k, v in iteritems(obj):
+        if k in {'id', 'name', 'notes', 'compartments', 'annotation'}:
+            setattr(model, k, v)
+    return model
diff --git a/cobra/io/json.py b/cobra/io/json.py
index 0cc8e5c..12ecee6 100644
--- a/cobra/io/json.py
+++ b/cobra/io/json.py
@@ -1,223 +1,134 @@
-from __future__ import absolute_import
+# -*- coding: utf-8 -*-
 
-import json
-from warnings import warn
+from __future__ import absolute_import
 
-from .. import Model, Metabolite, Reaction, Gene
-from six import iteritems, string_types
+import io
 
-# Detect numpy types to replace them.
 try:
-    from numpy import float_, bool_
+    import simplejson as json
 except ImportError:
-    class float_:
-        pass
-
-    class bool_:
-        pass
-
-_REQUIRED_REACTION_ATTRIBUTES = {"id", "name", "metabolites", "lower_bound",
-                                 "upper_bound", "gene_reaction_rule"}
-_OPTIONAL_REACTION_ATTRIBUTES = {
-    "objective_coefficient": 0,
-    "variable_kind": "continuous",
-    "subsystem": "",
-    "notes": {},
-    "annotation": {},
-}
+    import json
+from six import string_types
 
-_REQUIRED_METABOLITE_ATTRIBUTES = {"id", "name", "compartment"}
-_OPTIONAL_METABOLITE_ATTRIBUTES = {
-    "charge": None,
-    "formula": None,
-    "_bound": 0,
-    "_constraint_sense": "E",
-    "notes": {},
-    "annotation": {},
-}
+from cobra.io.dict import model_to_dict, model_from_dict
 
-_REQUIRED_GENE_ATTRIBUTES = {"id", "name"}
-_OPTIONAL_GENE_ATTRIBUTES = {
-    "notes": {},
-    "annotation": {},
-}
+JSON_SPEC = "1"
 
-_OPTIONAL_MODEL_ATTRIBUTES = {
-    "name": None,
-    #  "description": None, should not actually be included
-    "compartments": {},
-    "notes": {},
-    "annotation": {},
-}
 
+def to_json(model, **kwargs):
+    """
+    Return the model as a JSON document.
 
-def _fix_type(value):
-    """convert possible types to str, float, and bool"""
-    # Because numpy floats can not be pickled to json
-    if isinstance(value, string_types):
-        return str(value)
-    if isinstance(value, float_):
-        return float(value)
-    if isinstance(value, bool_):
-        return bool(value)
-    # handle legacy Formula type
-    if value.__class__.__name__ == "Formula":
-        return str(value)
-    if value is None:
-        return ''
-    return value
-
-
-def _from_dict(obj):
-    """build a model from a dict"""
-    if 'reactions' not in obj:
-        raise Exception('JSON object has no reactions attribute. Cannot load.')
-    model = Model()
-    model.add_metabolites(
-        [metabolite_from_dict(metabolite) for metabolite in obj['metabolites']]
-    )
-    model.genes.extend([gene_from_dict(gene) for gene in obj['genes']])
-    model.add_reactions(
-        [reaction_from_dict(reaction, model) for reaction in obj['reactions']]
-    )
-    for k, v in iteritems(obj):
-        if k in {'id', 'name', 'notes', 'compartments', 'annotation'}:
-            setattr(model, k, v)
-    return model
-
-
-def reaction_from_dict(reaction, model):
-    new_reaction = Reaction()
-    for k, v in iteritems(reaction):
-        if k == 'reversibility' or k == "reaction":
-            continue
-        elif k == 'metabolites':
-            new_reaction.add_metabolites(
-                {model.metabolites.get_by_id(str(met)): coeff
-                 for met, coeff in iteritems(v)})
-        else:
-            setattr(new_reaction, k, v)
-    return new_reaction
-
-
-def gene_from_dict(gene):
-    new_gene = Gene(gene["id"])
-    for k, v in iteritems(gene):
-        setattr(new_gene, k, v)
-    return new_gene
-
-
-def metabolite_from_dict(metabolite):
-    new_metabolite = Metabolite()
-    for k, v in iteritems(metabolite):
-        setattr(new_metabolite, k, v)
-    return new_metabolite
-
-
-def _update_optional(cobra_object, new_dict, optional_attribute_dict):
-    """update new_dict with optional attributes from cobra_object"""
-    for key, default_value in iteritems(optional_attribute_dict):
-        value = getattr(cobra_object, key)
-        if value is not None and value != default_value:
-            new_dict[key] = _fix_type(value)
-
-
-def _to_dict(model):
-    """convert the model to a dict"""
-    obj = dict(
-        reactions=[reaction_to_dict(reaction) for reaction in model.reactions],
-        metabolites=[
-            metabolite_to_dict(metabolite) for metabolite in model.metabolites
-            ],
-        genes=[gene_to_dict(gene) for gene in model.genes],
-        id=model.id,
-    )
-    _update_optional(model, obj, _OPTIONAL_MODEL_ATTRIBUTES)
-    # add in the JSON version
-    obj["version"] = 1
-    return obj
-
-
-def gene_to_dict(gene):
-    new_gene = {key: str(getattr(gene, key))
-                for key in _REQUIRED_GENE_ATTRIBUTES}
-    _update_optional(gene, new_gene, _OPTIONAL_GENE_ATTRIBUTES)
-    return new_gene
-
-
-def metabolite_to_dict(metabolite):
-    new_metabolite = {key: _fix_type(getattr(metabolite, key))
-                      for key in _REQUIRED_METABOLITE_ATTRIBUTES}
-    _update_optional(metabolite, new_metabolite,
-                     _OPTIONAL_METABOLITE_ATTRIBUTES)
-    return new_metabolite
-
-
-def reaction_to_dict(reaction):
-    new_reaction = {key: _fix_type(getattr(reaction, key))
-                    for key in _REQUIRED_REACTION_ATTRIBUTES
-                    if key != "metabolites"}
-    _update_optional(reaction, new_reaction, _OPTIONAL_REACTION_ATTRIBUTES)
-    # set metabolites
-    mets = {str(met): coeff for met, coeff
-            in iteritems(reaction._metabolites)}
-    new_reaction['metabolites'] = mets
-    return new_reaction
-
-
-def to_json(model):
-    """Save the cobra model as a json string"""
-    return json.dumps(_to_dict(model), allow_nan=False)
-
-
-def from_json(jsons):
-    """Load cobra model from a json string"""
-    return _from_dict(json.loads(jsons))
-
-
-def load_json_model(file_name):
-    """Load a cobra model stored as a json file
-
-    file_name : str or file-like object
+    ``kwargs`` are passed on to ``json.dumps``.
 
-    """
-    # open the file
-    should_close = False
-    if isinstance(file_name, string_types):
-        file_name = open(file_name, 'r')
-        should_close = True
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to represent.
 
-    model = _from_dict(json.load(file_name))
+    Returns
+    -------
+    str
+        String representation of the cobra model as a JSON document.
 
-    if should_close:
-        file_name.close()
+    See Also
+    --------
+    save_json_model : Write directly to a file.
+    json.dumps : Base function.
+    """
+    obj = model_to_dict(model)
+    obj[u"version"] = JSON_SPEC
+    return json.dumps(obj, allow_nan=False, **kwargs)
 
-    return model
 
+def from_json(document):
+    """
+    Load a cobra model from a JSON document.
+
+    Parameters
+    ----------
+    document : str
+        The JSON document representation of a cobra model.
 
-def save_json_model(model, file_name, pretty=False):
-    """Save the cobra model as a json file.
+    Returns
+    -------
+    cobra.Model
+        The cobra model as represented in the JSON document.
 
-    model : :class:`~cobra.core.Model.Model` object
+    See Also
+    --------
+    load_json_model : Load directly from a file.
+    """
+    return model_from_dict(json.loads(document))
 
-    file_name : str or file-like object
 
+def save_json_model(model, filename, pretty=False, **kwargs):
+    """
+    Write the cobra model to a file in JSON format.
+
+    ``kwargs`` are passed on to ``json.dump``.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to represent.
+    filename : str or file-like
+        File path or descriptor that the JSON representation should be
+        written to.
+    pretty : bool, optional
+        Whether to format the JSON more compactly (default) or in a more
+        verbose but easier to read fashion. Can be partially overwritten by the
+        ``kwargs``.
+
+    See Also
+    --------
+    to_json : Return a string representation.
+    json.dump : Base function.
     """
-    # open the file
-    should_close = False
-    if isinstance(file_name, string_types):
-        file_name = open(file_name, 'w')
-        should_close = True
+    obj = model_to_dict(model)
+    obj[u"version"] = JSON_SPEC
 
     if pretty:
-        dump_opts = {"indent": 4, "separators": (",", ": "), "sort_keys": True}
+        dump_opts = {
+            "indent": 4, "separators": (",", ": "), "sort_keys": True,
+            "allow_nan": False}
     else:
-        dump_opts = {}
+        dump_opts = {
+            "indent": 0, "separators": (",", ":"), "sort_keys": False,
+            "allow_nan": False}
+    dump_opts.update(**kwargs)
+
+    if isinstance(filename, string_types):
+        with open(filename, "w") as file_handle:
+            json.dump(obj, file_handle, **dump_opts)
+    else:
+        json.dump(obj, filename, **dump_opts)
 
-    json.dump(_to_dict(model), file_name, allow_nan=False, **dump_opts)
 
-    if should_close:
-        file_name.close()
+def load_json_model(filename):
+    """
+    Load a cobra model from a file in JSON format.
+
+    Parameters
+    ----------
+    filename : str or file-like
+        File path or descriptor that contains the JSON document describing the
+        cobra model.
+
+    Returns
+    -------
+    cobra.Model
+        The cobra model as represented in the JSON document.
+
+    See Also
+    --------
+    from_json : Load from a string.
+    """
+    if isinstance(filename, string_types):
+        with open(filename, "r") as file_handle:
+            return model_from_dict(json.load(file_handle))
+    else:
+        return model_from_dict(json.load(filename))
 
 
 json_schema = {
diff --git a/cobra/io/mat.py b/cobra/io/mat.py
index 37a7e33..ddcb57b 100644
--- a/cobra/io/mat.py
+++ b/cobra/io/mat.py
@@ -1,27 +1,26 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 import re
+from collections import OrderedDict
 from uuid import uuid4
 from warnings import warn
 
-from numpy import isinf, inf, array, object as np_object
-from scipy.io import loadmat, savemat
-from scipy.sparse import coo_matrix, dok_matrix
+from numpy import object as np_object
+from numpy import array, inf, isinf
+from six import string_types
 
-from .. import Model, Metabolite, Reaction
+from cobra.core import Metabolite, Model, Reaction
+from cobra.util import create_stoichiometric_matrix
+from cobra.util.solver import set_objective
 
-# try to use an ordered dict
 try:
-    from scipy.version import short_version
-    scipy_version = int(short_version.split(".")[1])
-    # if scipy version is earlier than 0.11, OrderedDict will not work, so use
-    # a dict instead
-    if scipy_version < 11:
-        DictClass = dict
-    else:
-        from collections import OrderedDict as DictClass
-    del short_version, scipy_version
+    import scipy.sparse as scipy_sparse
+    import scipy.io as scipy_io
 except ImportError:
-    DictClass = dict
-    short_version = None
+    scipy_sparse = None
+    scipy_io = None
 
 
 # precompiled regular expressions
@@ -67,7 +66,10 @@ def load_matlab_model(infile_path, variable_name=None, inf=inf):
         The resulting cobra model
 
     """
-    data = loadmat(infile_path)
+    if not scipy_io:
+        raise ImportError('load_matlab_model requires scipy')
+
+    data = scipy_io.loadmat(infile_path)
     possible_names = []
     if variable_name is None:
         # skip meta variables
@@ -93,18 +95,25 @@ def save_matlab_model(model, file_name, varname=None):
 
     This .mat file can be used directly in the MATLAB version of COBRA.
 
-    model : :class:`~cobra.core.Model.Model` object
-
+    Parameters
+    ----------
+    model : cobra.core.Model.Model object
+        The model to save
     file_name : str or file-like object
-
+        The file to save to
+    varname : string
+       The name of the variable within the workspace
     """
+    if not scipy_io:
+        raise ImportError('load_matlab_model requires scipy')
+
     if varname is None:
         varname = str(model.id) \
             if model.id is not None and len(model.id) > 0 \
             else "exported_model"
     mat = create_mat_dict(model)
-    savemat(file_name, {varname: mat},
-            appendmat=True, oned_as="column")
+    scipy_io.savemat(file_name, {varname: mat},
+                     appendmat=True, oned_as="column")
 
 
 def create_mat_metabolite_id(model):
@@ -118,10 +127,9 @@ def create_mat_metabolite_id(model):
 
 def create_mat_dict(model):
     """create a dict mapping model attributes to arrays"""
-    model = model.to_array_based_model(deepcopy_model=True)
     rxns = model.reactions
     mets = model.metabolites
-    mat = DictClass()
+    mat = OrderedDict()
     mat["mets"] = _cell([met_id for met_id in create_mat_metabolite_id(model)])
     mat["metNames"] = _cell(mets.list_attr("name"))
     mat["metFormulas"] = _cell([str(m.formula) for m in mets])
@@ -133,18 +141,21 @@ def create_mat_dict(model):
     mat["genes"] = _cell(model.genes.list_attr("id"))
     # make a matrix for rxnGeneMat
     # reactions are rows, genes are columns
-    rxnGene = dok_matrix((len(model.reactions), len(model.genes)))
-    if min(rxnGene.shape) > 0:
+    rxn_gene = scipy_sparse.dok_matrix((len(model.reactions),
+                                        len(model.genes)))
+    if min(rxn_gene.shape) > 0:
         for i, reaction in enumerate(model.reactions):
             for gene in reaction.genes:
-                rxnGene[i, model.genes.index(gene)] = 1
-        mat["rxnGeneMat"] = rxnGene
+                rxn_gene[i, model.genes.index(gene)] = 1
+        mat["rxnGeneMat"] = rxn_gene
     mat["grRules"] = _cell(rxns.list_attr("gene_reaction_rule"))
     mat["rxns"] = _cell(rxns.list_attr("id"))
     mat["rxnNames"] = _cell(rxns.list_attr("name"))
     mat["subSystems"] = _cell(rxns.list_attr("subsystem"))
-    mat["csense"] = "".join(model._constraint_sense)
-    mat["S"] = model.S if model.S is not None else [[]]
+    mat["csense"] = "".join((
+        met._constraint_sense for met in model.metabolites))
+    stoich_mat = create_stoichiometric_matrix(model)
+    mat["S"] = stoich_mat if stoich_mat is not None else [[]]
     # multiply by 1 to convert to float, working around scipy bug
     # https://github.com/scipy/scipy/issues/4537
     mat["lb"] = array(rxns.list_attr("lower_bound")) * 1.
@@ -173,15 +184,15 @@ def from_mat_struct(mat_struct, model_id=None, inf=inf):
         c_vec = None
         warn("objective vector 'c' not found")
     model = Model()
-    if "description" in m.dtype.names:
+    if model_id is not None:
+        model.id = model_id
+    elif "description" in m.dtype.names:
         description = m["description"][0, 0][0]
-        if len(description) > 1:
+        if not isinstance(description, string_types) and len(description) > 1:
             model.id = description[0]
             warn("Several IDs detected, only using the first.")
         else:
             model.id = description
-    elif model_id is not None:
-        model.id = model_id
     else:
         model.id = "imported_model"
     for i, name in enumerate(m["mets"][0, 0]):
@@ -192,7 +203,6 @@ def from_mat_struct(mat_struct, model_id=None, inf=inf):
             comp_index = m["metComps"][0, 0][i][0] - 1
             new_metabolite.compartment = m['comps'][0, 0][comp_index][0][0]
             if new_metabolite.compartment not in model.compartments:
-
                 comp_name = m['compNames'][0, 0][comp_index][0][0]
                 model.compartments[new_metabolite.compartment] = comp_name
         else:
@@ -217,6 +227,7 @@ def from_mat_struct(mat_struct, model_id=None, inf=inf):
             pass
         model.add_metabolites([new_metabolite])
     new_reactions = []
+    coefficients = {}
     for i, name in enumerate(m["rxns"][0, 0]):
         new_reaction = Reaction()
         new_reaction.id = str(name[0][0])
@@ -227,7 +238,7 @@ def from_mat_struct(mat_struct, model_id=None, inf=inf):
         if isinf(new_reaction.upper_bound) and new_reaction.upper_bound > 0:
             new_reaction.upper_bound = inf
         if c_vec is not None:
-            new_reaction.objective_coefficient = float(c_vec[i][0])
+            coefficients[new_reaction] = float(c_vec[i][0])
         try:
             new_reaction.gene_reaction_rule = str(m['grRules'][0, 0][i][0][0])
         except (IndexError, ValueError):
@@ -242,7 +253,8 @@ def from_mat_struct(mat_struct, model_id=None, inf=inf):
             pass
         new_reactions.append(new_reaction)
     model.add_reactions(new_reactions)
-    coo = coo_matrix(m["S"][0, 0])
+    set_objective(model, coefficients)
+    coo = scipy_sparse.coo_matrix(m["S"][0, 0])
     for i, j, v in zip(coo.row, coo.col, coo.data):
         model.reactions[j].add_metabolites({model.metabolites[i]: v})
     return model
@@ -259,16 +271,20 @@ def model_to_pymatbridge(model, variable_name="model", matlab=None):
 
     This model can then be manipulated through the COBRA toolbox
 
-    variable_name: str
+    Parameters
+    ----------
+    variable_name : str
         The variable name to which the model will be assigned in the
         MATLAB workspace
 
-    matlab: None or pymatbridge.Matlab instance
+    matlab : None or pymatbridge.Matlab instance
         The MATLAB workspace to which the variable will be sent. If
         this is None, then this will be sent to the same environment
         used in IPython magics.
 
     """
+    if scipy_sparse is None:
+        raise ImportError("`model_to_pymatbridge` requires scipy!")
     if matlab is None:  # assumed to be running an IPython magic
         from IPython import get_ipython
         matlab = get_ipython().magics_manager.registry["MatlabMagics"].Matlab
diff --git a/cobra/io/sbml.py b/cobra/io/sbml.py
index a3bdf7c..cc4d65f 100644
--- a/cobra/io/sbml.py
+++ b/cobra/io/sbml.py
@@ -1,94 +1,107 @@
-#cobra/sbml.py: Tools for reading / writing SBML now contained in
-#this module
-#System modules
-from .. import Model, Reaction, Metabolite
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import re
+from math import isinf, isnan
 from os.path import isfile
-from os import name as __name
 from warnings import warn
-import re
-from math import isnan, isinf
 
 from six import iteritems
 
-#
-if __name == 'java':
-    from org.sbml.jsbml import SBMLDocument, SpeciesReference, KineticLaw, Parameter
-    from org.sbml.jsbml import SBMLReader, SBMLWriter
-    __tmp_reader = SBMLReader()
-    __tmp_writer = SBMLWriter()
-    readSBML = __tmp_reader.readSBMLFromFile
-    writeSBML = __tmp_writer.writeSBMLToFile
-    from org.sbml.jsbml.Unit import Kind as __Kind
-    UNIT_KIND_MOLE = __Kind.MOLE
-    UNIT_KIND_GRAM = __Kind.GRAM
-    UNIT_KIND_SECOND = __Kind.SECOND
-    UNIT_KIND_DIMENSIONLESS = __Kind.DIMENSIONLESS
-else:
-    from libsbml import SBMLDocument, SpeciesReference, KineticLaw, Parameter
-    from libsbml import readSBML, writeSBML
-    from libsbml import UNIT_KIND_MOLE, UNIT_KIND_GRAM, UNIT_KIND_SECOND, UNIT_KIND_DIMENSIONLESS
-def parse_legacy_id(the_id, the_compartment=None, the_type='metabolite',
-                    use_hyphens=False):
-    """Deals with a bunch of problems due to bigg.ucsd.edu not following SBML standards
-
-    the_id: String.
+from cobra.core import Metabolite, Model, Reaction
+from cobra.util.solver import set_objective
 
-    the_compartment: String.
+try:
+    import libsbml
+except ImportError:
+    libsbml = None
 
-    the_type: String.  Currently only 'metabolite' is supported
 
-    use_hyphens:   Boolean.  If True, double underscores (__) in an SBML ID will be converted to hyphens
+def parse_legacy_id(the_id, the_compartment=None, the_type='metabolite',
+                    use_hyphens=False):
+    """Deals with a bunch of problems due to bigg.ucsd.edu not following SBML
+    standards
 
+    Parameters
+    ----------
+    the_id: String.
+    the_compartment: String
+    the_type: String
+        Currently only 'metabolite' is supported
+    use_hyphens: Boolean
+        If True, double underscores (__) in an SBML ID will be converted to
+        hyphens
+
+    Returns
+    -------
+    string: the identifier
     """
     if use_hyphens:
-        the_id = the_id.replace('__','-')
+        the_id = the_id.replace('__', '-')
     if the_type == 'metabolite':
         if the_id.split('_')[-1] == the_compartment:
-            #Reformat Ids to match convention in Palsson Lab.
-            the_id = the_id[:-len(the_compartment)-1]
-        the_id += '[%s]'%the_compartment
+            # Reformat Ids to match convention in Palsson Lab.
+            the_id = the_id[:-len(the_compartment) - 1]
+        the_id += '[%s]' % the_compartment
     return the_id
-def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_metabolite=False,
-                                      print_time=False, use_hyphens=False):
-    """convert an SBML XML file into a cobra.Model object.  Supports
-    SBML Level 2 Versions 1 and 4.  The function will detect if the SBML fbc package is used in the file
-    and run the converter if the fbc package is used.
-
-    sbml_filename: String.
-
-    old_sbml:  Boolean. Set to True if the XML file has metabolite
-    formula appended to metabolite names.  This was a poorly designed
-    artifact that persists in some models.
-
-    legacy_metabolite: Boolean.  If True then assume that the metabolite id has
-    the compartment id appended after an underscore (e.g. _c for cytosol).  This
-    has not been implemented but will be soon.
 
-    print_time: deprecated
-
-    use_hyphens:   Boolean.  If True, double underscores (__) in an SBML ID will be converted to hyphens
 
+def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False,
+                                      legacy_metabolite=False,
+                                      print_time=False, use_hyphens=False):
+    """convert an SBML XML file into a cobra.Model object.
+
+    Supports SBML Level 2 Versions 1 and 4.  The function will detect if the
+    SBML fbc package is used in the file and run the converter if the fbc
+    package is used.
+
+    Parameters
+    ----------
+    sbml_filename: string
+    old_sbml: bool
+        Set to True if the XML file has metabolite formula appended to
+        metabolite names. This was a poorly designed artifact that persists in
+        some models.
+    legacy_metabolite: bool
+        If True then assume that the metabolite id has the compartment id
+         appended after an underscore (e.g. _c for cytosol). This has not been
+         implemented but will be soon.
+    print_time: bool
+         deprecated
+    use_hyphens: bool
+        If True, double underscores (__) in an SBML ID will be converted to
+        hyphens
+
+    Returns
+    -------
+    Model : The parsed cobra model
     """
+    if not libsbml:
+        raise ImportError('create_cobra_model_from_sbml_file '
+                          'requires python-libsbml')
+
     __default_lower_bound = -1000
     __default_upper_bound = 1000
     __default_objective_coefficient = 0
-     # Ensure that the file exists
+    # Ensure that the file exists
     if not isfile(sbml_filename):
-        raise IOError('Your SBML file is not found: %s'%sbml_filename)
-    #Expressions to change SBML Ids to Palsson Lab Ids
+        raise IOError('Your SBML file is not found: %s' % sbml_filename)
+    # Expressions to change SBML Ids to Palsson Lab Ids
     metabolite_re = re.compile('^M_')
     reaction_re = re.compile('^R_')
     compartment_re = re.compile('^C_')
     if print_time:
-        warn("print_time is deprecated")
-    model_doc = readSBML(sbml_filename)
-    if (model_doc.getPlugin("fbc") != None):
+        warn("print_time is deprecated", DeprecationWarning)
+    model_doc = libsbml.readSBML(sbml_filename)
+    if model_doc.getPlugin("fbc") is not None:
         from libsbml import ConversionProperties, LIBSBML_OPERATION_SUCCESS
         conversion_properties = ConversionProperties()
-        conversion_properties.addOption("convert fbc to cobra", True, "Convert FBC model to Cobra model")
+        conversion_properties.addOption(
+            "convert fbc to cobra", True, "Convert FBC model to Cobra model")
         result = model_doc.convert(conversion_properties)
         if result != LIBSBML_OPERATION_SUCCESS:
-            raise(Exception("Conversion of SBML+fbc to COBRA failed"))
+            raise Exception("Conversion of SBML+fbc to COBRA failed")
     sbml_model = model_doc.getModel()
     sbml_model_id = sbml_model.getId()
     sbml_species = sbml_model.getListOfSpecies()
@@ -97,7 +110,8 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
     compartment_dict = dict([(compartment_re.split(x.getId())[-1], x.getName())
                              for x in sbml_compartments])
     if legacy_metabolite:
-        #Deal with the palsson lab appending the compartment id to the metabolite id
+        # Deal with the palsson lab appending the compartment id to the
+        # metabolite id
         new_dict = {}
         for the_id, the_name in compartment_dict.items():
             if the_name == '':
@@ -105,38 +119,44 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
             else:
                 new_dict[the_id] = the_name
         compartment_dict = new_dict
-        legacy_compartment_converter = dict([(v,k)
-                                             for k, v in compartment_dict.items()])
+        legacy_compartment_converter = dict(
+            [(v, k) for k, v in iteritems(compartment_dict)])
 
     cobra_model = Model(sbml_model_id)
     metabolites = []
     metabolite_dict = {}
-    #Convert sbml_metabolites to cobra.Metabolites
+    # Convert sbml_metabolites to cobra.Metabolites
     for sbml_metabolite in sbml_species:
-        #Skip sbml boundary species
+        # Skip sbml boundary species
         if sbml_metabolite.getBoundaryCondition():
             continue
 
         if (old_sbml or legacy_metabolite) and \
-               sbml_metabolite.getId().endswith('_b'):
-            #Deal with incorrect sbml from bigg.ucsd.edu
+                sbml_metabolite.getId().endswith('_b'):
+            # Deal with incorrect sbml from bigg.ucsd.edu
             continue
         tmp_metabolite = Metabolite()
         metabolite_id = tmp_metabolite.id = sbml_metabolite.getId()
-        tmp_metabolite.compartment = compartment_re.split(sbml_metabolite.getCompartment())[-1]
+        tmp_metabolite.compartment = compartment_re.split(
+            sbml_metabolite.getCompartment())[-1]
         if legacy_metabolite:
             if tmp_metabolite.compartment not in compartment_dict:
-                tmp_metabolite.compartment = legacy_compartment_converter[tmp_metabolite.compartment]
-            tmp_metabolite.id = parse_legacy_id(tmp_metabolite.id, tmp_metabolite.compartment,
-                                                use_hyphens=use_hyphens)
+                tmp_metabolite.compartment = legacy_compartment_converter[
+                    tmp_metabolite.compartment]
+            tmp_metabolite.id = parse_legacy_id(
+                tmp_metabolite.id, tmp_metabolite.compartment,
+                use_hyphens=use_hyphens)
         if use_hyphens:
-            tmp_metabolite.id = metabolite_re.split(tmp_metabolite.id)[-1].replace('__','-')
+            tmp_metabolite.id = metabolite_re.split(
+                tmp_metabolite.id)[-1].replace('__', '-')
         else:
-            #Just in case the SBML ids are ill-formed and use -
-            tmp_metabolite.id = metabolite_re.split(tmp_metabolite.id)[-1].replace('-','__')
+            # Just in case the SBML ids are ill-formed and use -
+            tmp_metabolite.id = metabolite_re.split(
+                tmp_metabolite.id)[-1].replace('-', '__')
         tmp_metabolite.name = sbml_metabolite.getName()
         tmp_formula = ''
-        tmp_metabolite.notes = parse_legacy_sbml_notes(sbml_metabolite.getNotesString())
+        tmp_metabolite.notes = parse_legacy_sbml_notes(
+            sbml_metabolite.getNotesString())
         if sbml_metabolite.isSetCharge():
             tmp_metabolite.charge = sbml_metabolite.getCharge()
         if "CHARGE" in tmp_metabolite.notes:
@@ -146,16 +166,19 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
                 if note_charge == int(note_charge):
                     note_charge = int(note_charge)
             except:
-                warn("charge of %s is not a number (%s)" % (tmp_metabolite.id, str(note_charge)))
+                warn("charge of %s is not a number (%s)" %
+                     (tmp_metabolite.id, str(note_charge)))
             else:
-                if tmp_metabolite.charge is None or tmp_metabolite.charge == note_charge:
+                if ((tmp_metabolite.charge is None) or
+                        (tmp_metabolite.charge == note_charge)):
                     tmp_metabolite.notes.pop("CHARGE")
                     # set charge to the one from notes if not assigend before
                     # the same
                     tmp_metabolite.charge = note_charge
                 else:  # tmp_metabolite.charge != note_charge
                     msg = "different charges specified for %s (%d and %d)"
-                    msg = msg % (tmp_metabolite.id, tmp_metabolite.charge, note_charge)
+                    msg = msg % (tmp_metabolite.id,
+                                 tmp_metabolite.charge, note_charge)
                     warn(msg)
                     # Chances are a 0 note charge was written by mistake. We
                     # will default to the note_charge in this case.
@@ -168,37 +191,41 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
                 break
         if tmp_formula == '' and old_sbml:
             tmp_formula = tmp_metabolite.name.split('_')[-1]
-            tmp_metabolite.name = tmp_metabolite.name[:-len(tmp_formula)-1]
+            tmp_metabolite.name = tmp_metabolite.name[:-len(tmp_formula) - 1]
         tmp_metabolite.formula = tmp_formula
         metabolite_dict.update({metabolite_id: tmp_metabolite})
         metabolites.append(tmp_metabolite)
     cobra_model.add_metabolites(metabolites)
 
-    #Construct the vectors and matrices for holding connectivity and numerical info
-    #to feed to the cobra toolbox.
-    #Always assume steady state simulations so b is set to 0
+    # Construct the vectors and matrices for holding connectivity and numerical
+    # info to feed to the cobra toolbox.
+    # Always assume steady state simulations so b is set to 0
     cobra_reaction_list = []
+    coefficients = {}
     for sbml_reaction in sbml_reactions:
         if use_hyphens:
-            #Change the ids to match conventions used by the Palsson lab.
-            reaction = Reaction(reaction_re.split(sbml_reaction.getId())[-1].replace('__','-'))
+            # Change the ids to match conventions used by the Palsson lab.
+            reaction = Reaction(reaction_re.split(
+                sbml_reaction.getId())[-1].replace('__', '-'))
         else:
-            #Just in case the SBML ids are ill-formed and use -
-            reaction = Reaction(reaction_re.split(sbml_reaction.getId())[-1].replace('-','__'))
+            # Just in case the SBML ids are ill-formed and use -
+            reaction = Reaction(reaction_re.split(
+                sbml_reaction.getId())[-1].replace('-', '__'))
         cobra_reaction_list.append(reaction)
-        #reaction.exchange_reaction = 0
+        # reaction.exchange_reaction = 0
         reaction.name = sbml_reaction.getName()
         cobra_metabolites = {}
-        #Use the cobra.Metabolite class here
+        # Use the cobra.Metabolite class here
         for sbml_metabolite in sbml_reaction.getListOfReactants():
             tmp_metabolite_id = sbml_metabolite.getSpecies()
-            #This deals with boundary metabolites
+            # This deals with boundary metabolites
             if tmp_metabolite_id in metabolite_dict:
                 tmp_metabolite = metabolite_dict[tmp_metabolite_id]
-                cobra_metabolites[tmp_metabolite] = -sbml_metabolite.getStoichiometry()
+                cobra_metabolites[tmp_metabolite] = - \
+                    sbml_metabolite.getStoichiometry()
         for sbml_metabolite in sbml_reaction.getListOfProducts():
             tmp_metabolite_id = sbml_metabolite.getSpecies()
-            #This deals with boundary metabolites
+            # This deals with boundary metabolites
             if tmp_metabolite_id in metabolite_dict:
                 tmp_metabolite = metabolite_dict[tmp_metabolite_id]
                 # Handle the case where the metabolite was specified both
@@ -206,37 +233,43 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
                 if tmp_metabolite in cobra_metabolites:
                     warn("%s appears as a reactant and product %s" %
                          (tmp_metabolite_id, reaction.id))
-                    cobra_metabolites[tmp_metabolite] += sbml_metabolite.getStoichiometry()
+                    cobra_metabolites[
+                        tmp_metabolite] += sbml_metabolite.getStoichiometry()
                     # if the combined stoichiometry is 0, remove the metabolite
                     if cobra_metabolites[tmp_metabolite] == 0:
                         cobra_metabolites.pop(tmp_metabolite)
                 else:
-                    cobra_metabolites[tmp_metabolite] = sbml_metabolite.getStoichiometry()
+                    cobra_metabolites[
+                        tmp_metabolite] = sbml_metabolite.getStoichiometry()
         # check for nan
         for met, v in iteritems(cobra_metabolites):
             if isnan(v) or isinf(v):
                 warn("invalid value %s for metabolite '%s' in reaction '%s'" %
                      (str(v), met.id, reaction.id))
         reaction.add_metabolites(cobra_metabolites)
-        #Parse the kinetic law info here.
+        # Parse the kinetic law info here.
         parameter_dict = {}
-        #If lower and upper bounds are specified in the Kinetic Law then
-        #they override the sbml reversible attribute.  If they are not
-        #specified then the bounds are determined by getReversible.
+        # If lower and upper bounds are specified in the Kinetic Law then
+        # they override the sbml reversible attribute.  If they are not
+        # specified then the bounds are determined by getReversible.
         if not sbml_reaction.getKineticLaw():
 
             if sbml_reaction.getReversible():
                 parameter_dict['lower_bound'] = __default_lower_bound
                 parameter_dict['upper_bound'] = __default_upper_bound
             else:
-                #Assume that irreversible reactions only proceed from left to right.
+                # Assume that irreversible reactions only proceed from left to
+                # right.
                 parameter_dict['lower_bound'] = 0
                 parameter_dict['upper_bound'] = __default_upper_bound
 
-            parameter_dict['objective_coefficient'] = __default_objective_coefficient
+            parameter_dict[
+                'objective_coefficient'] = __default_objective_coefficient
         else:
-            for sbml_parameter in sbml_reaction.getKineticLaw().getListOfParameters():
-                parameter_dict[sbml_parameter.getId().lower()] = sbml_parameter.getValue()
+            for sbml_parameter in \
+                    sbml_reaction.getKineticLaw().getListOfParameters():
+                parameter_dict[
+                    sbml_parameter.getId().lower()] = sbml_parameter.getValue()
 
         if 'lower_bound' in parameter_dict:
             reaction.lower_bound = parameter_dict['lower_bound']
@@ -254,12 +287,11 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
         else:
             reaction.upper_bound = __default_upper_bound
 
-        if 'objective_coefficient' in parameter_dict:
-            reaction.objective_coefficient = parameter_dict['objective_coefficient']
-        elif 'objective coefficient' in parameter_dict:
-            reaction.objective_coefficient = parameter_dict['objective coefficient']
-        else:
-            reaction.objective_coefficient = __default_objective_coefficient
+        objective_coefficient = parameter_dict.get(
+            'objective_coefficient', parameter_dict.get(
+                'objective_coefficient', __default_objective_coefficient))
+        if objective_coefficient != 0:
+            coefficients[reaction] = objective_coefficient
 
         # ensure values are not set to nan or inf
         if isnan(reaction.lower_bound) or isinf(reaction.lower_bound):
@@ -267,12 +299,13 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
         if isnan(reaction.upper_bound) or isinf(reaction.upper_bound):
             reaction.upper_bound = __default_upper_bound
 
-        reaction_note_dict = parse_legacy_sbml_notes(sbml_reaction.getNotesString())
-        #Parse the reaction notes.
-        #POTENTIAL BUG: DEALING WITH LEGACY 'SBML' THAT IS NOT IN A
-        #STANDARD FORMAT
-        #TODO: READ IN OTHER NOTES AND GIVE THEM A reaction_ prefix.
-        #TODO: Make sure genes get added as objects
+        reaction_note_dict = parse_legacy_sbml_notes(
+            sbml_reaction.getNotesString())
+        # Parse the reaction notes.
+        # POTENTIAL BUG: DEALING WITH LEGACY 'SBML' THAT IS NOT IN A
+        # STANDARD FORMAT
+        # TODO: READ IN OTHER NOTES AND GIVE THEM A reaction_ prefix.
+        # TODO: Make sure genes get added as objects
         if 'GENE ASSOCIATION' in reaction_note_dict:
             rule = reaction_note_dict['GENE ASSOCIATION'][0]
             try:
@@ -284,8 +317,8 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
             reaction.gene_reaction_rule = rule
             if 'GENE LIST' in reaction_note_dict:
                 reaction.systematic_names = reaction_note_dict['GENE LIST'][0]
-            elif 'GENES' in reaction_note_dict and \
-                     reaction_note_dict['GENES'] != ['']:
+            elif ('GENES' in reaction_note_dict and
+                  reaction_note_dict['GENES'] != ['']):
                 reaction.systematic_names = reaction_note_dict['GENES'][0]
             elif 'LOCUS' in reaction_note_dict:
                 gene_id_to_object = dict([(x.id, x) for x in reaction._genes])
@@ -297,32 +330,31 @@ def create_cobra_model_from_sbml_file(sbml_filename, old_sbml=False, legacy_meta
                         tmp_row_dict[k] = v
                     tmp_locus_id = tmp_row_dict['LOCUS']
                     if 'TRANSCRIPT' in tmp_row_dict:
-                        tmp_locus_id = tmp_locus_id + '.' + tmp_row_dict['TRANSCRIPT']
+                        tmp_locus_id = tmp_locus_id + \
+                                       '.' + tmp_row_dict['TRANSCRIPT']
 
                     if 'ABBREVIATION' in tmp_row_dict:
-                        gene_id_to_object[tmp_locus_id].name = tmp_row_dict['ABBREVIATION']
+                        gene_id_to_object[tmp_locus_id].name = tmp_row_dict[
+                            'ABBREVIATION']
 
         if 'SUBSYSTEM' in reaction_note_dict:
             reaction.subsystem = reaction_note_dict.pop('SUBSYSTEM')[0]
 
         reaction.notes = reaction_note_dict
 
-
-    #Now, add all of the reactions to the model.
+    # Now, add all of the reactions to the model.
     cobra_model.id = sbml_model.getId()
-    #Populate the compartment list - This will be done based on cobra.Metabolites
-    #in cobra.Reactions in the future.
+    # Populate the compartment list - This will be done based on
+    # cobra.Metabolites in cobra.Reactions in the future.
     cobra_model.compartments = compartment_dict
 
     cobra_model.add_reactions(cobra_reaction_list)
-        #cobra_model.update_rules()
+    set_objective(cobra_model, coefficients)
     return cobra_model
 
 
-def parse_legacy_sbml_notes(note_string, note_delimiter = ':'):
-    """Deal with legacy SBML format issues arising from the
-    COBRA Toolbox for MATLAB and BiGG.ucsd.edu developers.
-
+def parse_legacy_sbml_notes(note_string, note_delimiter=':'):
+    """Deal with various legacy SBML format issues.
     """
     note_dict = {}
     start_tag = '<p>'
@@ -333,21 +365,32 @@ def parse_legacy_sbml_notes(note_string, note_delimiter = ':'):
     while start_tag in note_string and end_tag in note_string:
         note_start = note_string.index(start_tag)
         note_end = note_string.index(end_tag)
-        the_note = note_string[(note_start + len(start_tag)):note_end].lstrip(' ').rstrip(' ')
+        the_note = note_string[
+                   (note_start + len(start_tag)):note_end].lstrip(' ').rstrip(
+            ' ')
         if note_delimiter in the_note:
             note_delimiter_index = the_note.index(note_delimiter)
-            note_field = the_note[:note_delimiter_index].lstrip(' ').rstrip(' ').replace('_',' ').upper()
-            note_value = the_note[(note_delimiter_index+1):].lstrip(' ').rstrip(' ')
+            note_field = the_note[:note_delimiter_index].lstrip(
+                ' ').rstrip(' ').replace('_', ' ').upper()
+            note_value = the_note[
+                         (note_delimiter_index + 1):].lstrip(' ').rstrip(' ')
             if note_field in note_dict:
                 note_dict[note_field].append(note_value)
             else:
                 note_dict[note_field] = [note_value]
-        note_string = note_string[(note_end+len(end_tag)): ]
+        note_string = note_string[(note_end + len(end_tag)):]
+
+    if ('CHARGE' in note_dict and
+            note_dict['CHARGE'][0].lower() in ['none', 'na', 'nan']):
+        note_dict.pop('CHARGE')  # Remove non-numeric charges
 
-    if 'CHARGE' in note_dict and note_dict['CHARGE'][0].lower() in ['none', 'na', 'nan']:
-        note_dict.pop('CHARGE') #Remove non-numeric charges
+    if 'CHARGE' in note_dict and note_dict['CHARGE'][0].lower() in ['none',
+                                                                    'na',
+                                                                    'nan']:
+        note_dict.pop('CHARGE')  # Remove non-numeric charges
+
+    return note_dict
 
-    return(note_dict)
 
 def write_cobra_model_to_sbml_file(cobra_model, sbml_filename,
                                    sbml_level=2, sbml_version=1,
@@ -355,63 +398,68 @@ def write_cobra_model_to_sbml_file(cobra_model, sbml_filename,
                                    use_fbc_package=True):
     """Write a cobra.Model object to an SBML XML file.
 
-    cobra_model:  :class:`~cobra.core.Model.Model` object
-
-    sbml_filename:  The file to write the SBML XML to.
-
-    sbml_level:  2 is the only level supported at the moment.
-
-    sbml_version: 1 is the only version supported at the moment.
-
-    use_fbc_package: Boolean.
+    Parameters
+    ----------
+    cobra_model : cobra.core.Model.Model
+        The model object to write
+    sbml_filename : string
+        The file to write the SBML XML to.
+    sbml_level : int
+        2 is the only supported level.
+    sbml_version : int
+        1 is the only supported version.
+    print_time : bool
+        deprecated
+    use_fbc_package : bool
         Convert the model to the FBC package format to improve portability.
         http://sbml.org/Documents/Specifications/SBML_Level_3/Packages/Flux_Balance_Constraints_(flux)
 
-
+    Notes
+    -----
     TODO: Update the NOTES to match the SBML standard and provide support for
     Level 2 Version 4
-
     """
-
+    if not libsbml:
+        raise ImportError('write_cobra_model_to_sbml_file '
+                          'requires python-libsbml')
     sbml_doc = get_libsbml_document(cobra_model,
-                                   sbml_level=sbml_level, sbml_version=sbml_version,
-                                   print_time=print_time,
-                                   use_fbc_package=use_fbc_package)
+                                    sbml_level=sbml_level,
+                                    sbml_version=sbml_version,
+                                    print_time=print_time,
+                                    use_fbc_package=use_fbc_package)
 
-    writeSBML(sbml_doc, sbml_filename)
+    libsbml.writeSBML(sbml_doc, sbml_filename)
 
-def get_libsbml_document(cobra_model,
-                                   sbml_level=2, sbml_version=1,
-                                   print_time=False,
-                                   use_fbc_package=True):
 
+def get_libsbml_document(cobra_model,
+                         sbml_level=2, sbml_version=1,
+                         print_time=False,
+                         use_fbc_package=True):
     """ Return a libsbml document object for writing to a file. This function
     is used by write_cobra_model_to_sbml_file(). """
-
     note_start_tag, note_end_tag = '<p>', '</p>'
     if sbml_level > 2 or (sbml_level == 2 and sbml_version == 4):
         note_start_tag, note_end_tag = '<html:p>', '</html:p>'
 
-
-    sbml_doc = SBMLDocument(sbml_level, sbml_version)
+    sbml_doc = libsbml.SBMLDocument(sbml_level, sbml_version)
     sbml_model = sbml_doc.createModel(cobra_model.id.split('.')[0])
-    #Note need to set units
+    # Note need to set units
     reaction_units = 'mmol_per_gDW_per_hr'
     model_units = sbml_model.createUnitDefinition()
     model_units.setId(reaction_units)
     sbml_unit = model_units.createUnit()
-    sbml_unit.setKind(UNIT_KIND_MOLE)
+    sbml_unit.setKind(libsbml.UNIT_KIND_MOLE)
     sbml_unit.setScale(-3)
     sbml_unit = model_units.createUnit()
-    sbml_unit.setKind(UNIT_KIND_GRAM)
+    sbml_unit.setKind(libsbml.UNIT_KIND_GRAM)
     sbml_unit.setExponent(-1)
     sbml_unit = model_units.createUnit()
-    sbml_unit.setKind(UNIT_KIND_SECOND)
-    sbml_unit.setMultiplier(1.0/60/60)
+    sbml_unit.setKind(libsbml.UNIT_KIND_SECOND)
+    sbml_unit.setMultiplier(1.0 / 60 / 60)
     sbml_unit.setExponent(-1)
 
-    #Add in the common compartment abbreviations.  If there are additional compartments
-    #they also need to be added.
+    # Add in the common compartment abbreviations.  If there are additional
+    # compartments they also need to be added.
     if not cobra_model.compartments:
         cobra_model.compartments = {'c': 'cytosol',
                                     'p': 'periplasm',
@@ -420,26 +468,25 @@ def get_libsbml_document(cobra_model,
         sbml_comp = sbml_model.createCompartment()
         sbml_comp.setId(the_key)
         sbml_comp.setName(cobra_model.compartments[the_key])
-        sbml_comp.setSize(1) #Just to get rid of warnings
+        sbml_comp.setSize(1)  # Just to get rid of warnings
 
     if print_time:
-        warn("print_time is deprecated")
-    #Use this dict to allow for fast look up of species id
-    #for references created in the reaction section.
+        warn("print_time is deprecated", DeprecationWarning)
+    # Use this dict to allow for fast look up of species id
+    # for references created in the reaction section.
     metabolite_dict = {}
 
     for cobra_metabolite in cobra_model.metabolites:
-        metabolite_dict[cobra_metabolite.id] =  add_sbml_species(sbml_model,
-                                                                 cobra_metabolite,
-                                                                 note_start_tag=note_start_tag,
-                                                                 note_end_tag=note_end_tag)
+        metabolite_dict[cobra_metabolite.id] = add_sbml_species(
+            sbml_model, cobra_metabolite, note_start_tag=note_start_tag,
+            note_end_tag=note_end_tag)
 
     for the_reaction in cobra_model.reactions:
-        #This is probably the culprit.  Including cobra.Reaction
-        #objects explicitly in cobra.Model will speed this up.
+        # This is probably the culprit.  Including cobra.Reaction
+        # objects explicitly in cobra.Model will speed this up.
         sbml_reaction = sbml_model.createReaction()
-        #Need to remove - for proper SBML.  Replace with __
-        the_reaction_id = 'R_' + the_reaction.id.replace('-','__' )
+        # Need to remove - for proper SBML.  Replace with __
+        the_reaction_id = 'R_' + the_reaction.id.replace('-', '__')
         sbml_reaction.setId(the_reaction_id)
         # The reason we are not using the Reaction.reversibility property
         # is because the SBML definition of reversibility does not quite
@@ -456,11 +503,12 @@ def get_libsbml_document(cobra_model,
             sbml_reaction.setName(the_reaction.name)
         else:
             sbml_reaction.setName(the_reaction.id)
-        #Add in the reactant/product references
-        for the_metabolite, the_coefficient in the_reaction._metabolites.items():
+        # Add in the reactant/product references
+        for the_metabolite, the_coefficient in \
+                iteritems(the_reaction._metabolites):
             sbml_stoichiometry = the_coefficient
             metabolite_id = str(metabolite_dict[the_metabolite.id])
-            #Each SpeciesReference must have a unique id
+            # Each SpeciesReference must have a unique id
             if sbml_stoichiometry < 0:
                 species_reference = sbml_reaction.createReactant()
             else:
@@ -468,15 +516,16 @@ def get_libsbml_document(cobra_model,
             species_reference.setId(metabolite_id + '_' + the_reaction_id)
             species_reference.setSpecies(metabolite_id)
             species_reference.setStoichiometry(abs(sbml_stoichiometry))
-        #Deal with the case where the reaction is a boundary reaction
+        # Deal with the case where the reaction is a boundary reaction
         if len(the_reaction._metabolites) == 1:
-            the_metabolite, the_coefficient = list(the_reaction._metabolites.items())[0]
+            the_metabolite, the_coefficient = list(
+                the_reaction._metabolites.items())[0]
             metabolite_id = add_sbml_species(sbml_model, the_metabolite,
                                              note_start_tag=note_start_tag,
                                              note_end_tag=note_end_tag,
                                              boundary_metabolite=True)
             sbml_stoichiometry = -the_coefficient
-            #Each SpeciesReference must have a unique id
+            # Each SpeciesReference must have a unique id
             if sbml_stoichiometry < 0:
                 species_reference = sbml_reaction.createReactant()
             else:
@@ -485,18 +534,19 @@ def get_libsbml_document(cobra_model,
             species_reference.setSpecies(metabolite_id)
             species_reference.setStoichiometry(abs(sbml_stoichiometry))
 
-        #Add in the kineticLaw
-        sbml_law = KineticLaw(sbml_level, sbml_version)
+        # Add in the kineticLaw
+        sbml_law = libsbml.KineticLaw(sbml_level, sbml_version)
         if hasattr(sbml_law, 'setId'):
             sbml_law.setId('FLUX_VALUE')
         sbml_law.setFormula('FLUX_VALUE')
-        reaction_parameter_dict = {'LOWER_BOUND': [the_reaction.lower_bound, reaction_units],
-                                   'UPPER_BOUND': [the_reaction.upper_bound, reaction_units],
-                                   'FLUX_VALUE': [0, reaction_units],
-                                   'OBJECTIVE_COEFFICIENT': [the_reaction.objective_coefficient,
-                                                             'dimensionless']}
+        reaction_parameter_dict = {
+            'LOWER_BOUND': [the_reaction.lower_bound, reaction_units],
+            'UPPER_BOUND': [the_reaction.upper_bound, reaction_units],
+            'FLUX_VALUE': [0, reaction_units],
+            'OBJECTIVE_COEFFICIENT': [the_reaction.objective_coefficient,
+                                      'dimensionless']}
         for k, v in reaction_parameter_dict.items():
-            sbml_parameter = Parameter(sbml_level, sbml_version)
+            sbml_parameter = libsbml.Parameter(sbml_level, sbml_version)
             sbml_parameter.setId(k)
             if hasattr(v, '__iter__'):
                 sbml_parameter.setValue(v[0])
@@ -506,81 +556,95 @@ def get_libsbml_document(cobra_model,
             sbml_law.addParameter(sbml_parameter)
         sbml_reaction.setKineticLaw(sbml_law)
 
-        #Checks if GPR and Subsystem annotations are present in the notes section and if they are the same as those in
-        #the reaction's gene_reaction_rule/ subsystem attribute
-        #If they are not identical, they are set to be identical
+        # Checks if GPR and Subsystem annotations are present in the notes
+        # section and if they are the same as those in the reaction's
+        # gene_reaction_rule/ subsystem attribute. If they are not identical,
+        # they are set to be identical
         note_dict = the_reaction.notes.copy()
         if the_reaction.gene_reaction_rule:
-            if 'GENE ASSOCIATION' in note_dict:
-                del note_dict['GENE ASSOCIATION']
-            note_dict['GENE_ASSOCIATION'] = [str(the_reaction.gene_reaction_rule)]
+            note_dict['GENE ASSOCIATION'] = [
+                str(the_reaction.gene_reaction_rule)]
         if the_reaction.subsystem:
             note_dict['SUBSYSTEM'] = [str(the_reaction.subsystem)]
 
-        #In a cobrapy model the notes section is stored as a dictionary. The following section turns the key-value-pairs
-        #of the dictionary into a string and replaces recurring symbols so that the string has the required syntax for
-        #an SBML doc.
+        # In a cobrapy model the notes section is stored as a dictionary. The
+        # following section turns the key-value-pairs of the dictionary into a
+        # string and replaces recurring symbols so that the string has the i
+        # required syntax for an SBML doc.
         note_str = str(list(iteritems(note_dict)))
         note_start_tag, note_end_tag, note_delimiter = '<p>', '</p>', ':'
-        note_str = note_str.replace('(\'',note_start_tag)
-        note_str = note_str.replace('\']),',note_end_tag)
-        note_str = note_str.replace('\',',note_delimiter)
-        note_str = note_str.replace('\']','')
-        note_str = note_str.replace('[\'','')
-        note_str = note_str.replace('[','<html xmlns="http://www.w3.org/1999/xhtml">')
-        note_str = note_str.replace(')]',note_end_tag+'</html>')
+        note_str = note_str.replace('(\'', note_start_tag)
+        note_str = note_str.replace('\']),', note_end_tag)
+        note_str = note_str.replace('\',', note_delimiter)
+        note_str = note_str.replace('\']', '')
+        note_str = note_str.replace('[\'', '')
+        note_str = note_str.replace(
+            '[', '<html xmlns="http://www.w3.org/1999/xhtml">')
+        note_str = note_str.replace(')]', note_end_tag + '</html>')
         sbml_reaction.setNotes(note_str)
 
     if use_fbc_package:
         try:
             from libsbml import ConversionProperties, LIBSBML_OPERATION_SUCCESS
             conversion_properties = ConversionProperties()
-            conversion_properties.addOption("convert cobra", True, "Convert Cobra model")
+            conversion_properties.addOption(
+                "convert cobra", True, "Convert Cobra model")
             result = sbml_doc.convert(conversion_properties)
             if result != LIBSBML_OPERATION_SUCCESS:
                 raise Exception("Conversion of COBRA to SBML+fbc failed")
         except Exception as e:
             error_string = 'Error saving as SBML+fbc. %s'
             try:
-                #Check whether the FbcExtension is there
+                # Check whether the FbcExtension is there
                 from libsbml import FbcExtension
-                error_string = error_string%e
+                error_string = error_string % e
             except ImportError:
-                error_string = error_string%'FbcExtension not available in libsbml. ' +\
-                               'If use_fbc_package == True then libsbml must be compiled with ' +\
-                               'the fbc extension. '
+                error_string = error_string % "FbcExtension not available in "
+                "libsbml. If use_fbc_package == True then libsbml must be "
+                "compiled with the fbc extension."
                 from libsbml import getLibSBMLDottedVersion
                 _sbml_version = getLibSBMLDottedVersion()
                 _major, _minor, _patch = map(int, _sbml_version.split('.'))
                 if _major < 5 or (_major == 5 and _minor < 8):
-                    error_string += "You've got libsbml %s installed.   You need 5.8.0 or later with the fbc package"
+                    error_string += "You've got libsbml %s installed. "
+                    "You need 5.8.0 or later with the fbc package"
 
-            raise(Exception(error_string))
+            raise Exception(error_string)
     return sbml_doc
 
+
 def add_sbml_species(sbml_model, cobra_metabolite, note_start_tag,
                      note_end_tag, boundary_metabolite=False):
     """A helper function for adding cobra metabolites to an sbml model.
 
+    Parameters
+    ----------
     sbml_model: sbml_model object
 
     cobra_metabolite: a cobra.Metabolite object
 
-    note_start_tag: the start tag for parsing cobra notes. this will eventually
-    be supplanted when COBRA is worked into sbml.
+    note_start_tag: string
+       the start tag for parsing cobra notes. this will eventually
+       be supplanted when COBRA is worked into sbml.
 
-    note_end_tag: the end tag for parsing cobra notes. this will eventually
-    be supplanted when COBRA is worked into sbml.
+    note_end_tag: string
+       the end tag for parsing cobra notes. this will eventually
+       be supplanted when COBRA is worked into sbml.
+    boundary_metabolite: bool
+       if metabolite boundary condition should be set or not
 
+    Returns
+    -------
+    string: the created metabolite identifier
     """
     sbml_species = sbml_model.createSpecies()
     the_id = 'M_' + cobra_metabolite.id.replace('-', '__')
-    #Deal with legacy naming issues
+    # Deal with legacy naming issues
     the_compartment = cobra_metabolite.compartment
-    if the_id.endswith('[%s]'%the_compartment):
-        the_id = the_id[:-len('[%s]'%the_compartment)]
-    elif not the_id.endswith('_%s'%the_compartment):
-        the_id += '_%s'%the_compartment
+    if the_id.endswith('[%s]' % the_compartment):
+        the_id = the_id[:-len('[%s]' % the_compartment)]
+    elif not the_id.endswith('_%s' % the_compartment):
+        the_id += '_%s' % the_compartment
     if boundary_metabolite:
         the_id += '_boundary'
     sbml_species.setId(the_id)
@@ -599,30 +663,33 @@ def add_sbml_species(sbml_model, cobra_metabolite, note_start_tag,
             return cobra_metabolite
     if cobra_metabolite.charge is not None:
         sbml_species.setCharge(cobra_metabolite.charge)
-    #Deal with cases where the formula in the model is not an object but as a string
+    # Deal with cases where the formula in the model is not an object but as a
+    # string
     if cobra_metabolite.formula or cobra_metabolite.notes:
-        tmp_note =  '<html xmlns="http://www.w3.org/1999/xhtml">'
+        tmp_note = '<html xmlns="http://www.w3.org/1999/xhtml">'
         if hasattr(cobra_metabolite.formula, 'id'):
-            tmp_note += '%sFORMULA: %s%s'%(note_start_tag,
-                                              cobra_metabolite.formula.id,
-                                              note_end_tag)
+            tmp_note += '%sFORMULA: %s%s' % (note_start_tag,
+                                             cobra_metabolite.formula.id,
+                                             note_end_tag)
         else:
-            tmp_note += '%sFORMULA: %s%s'%(note_start_tag,
-                                  cobra_metabolite.formula,
-                                  note_end_tag)
+            tmp_note += '%sFORMULA: %s%s' % (note_start_tag,
+                                             cobra_metabolite.formula,
+                                             note_end_tag)
         if hasattr(cobra_metabolite.notes, 'items'):
             for the_id_type, the_id in cobra_metabolite.notes.items():
                 if the_id_type.lower() == 'charge':
-                    continue #Use of notes['CHARGE'] has been deprecated in favor of metabolite.charge
+                    # Use of notes['CHARGE'] has been deprecated in favor of
+                    # metabolite.charge
+                    continue
                 if not isinstance(the_id_type, str):
                     the_id_type = str(the_id_type)
                 if hasattr(the_id, '__iter__') and len(the_id) == 1:
                     the_id = the_id[0]
                 if not isinstance(the_id, str):
                     the_id = str(the_id)
-                tmp_note += '%s%s: %s%s'%(note_start_tag,
-                                             the_id_type,
-                                             the_id, note_end_tag)
+                tmp_note += '%s%s: %s%s' % (note_start_tag,
+                                            the_id_type,
+                                            the_id, note_end_tag)
         sbml_species.setNotes(tmp_note + '</html>')
     return metabolite_id
 
@@ -649,7 +716,7 @@ def fix_legacy_id(id, use_hyphens=False, fix_compartments=False):
     if fix_compartments:
         if len(id) > 2:
             if (id[-3] == "(" and id[-1] == ")") or \
-               (id[-3] == "[" and id[-1] == "]"):
+                    (id[-3] == "[" and id[-1] == "]"):
                 id = id[:-3] + "_" + id[-2]
     return id
 
diff --git a/cobra/io/sbml3.py b/cobra/io/sbml3.py
index c08f433..d2c1c07 100644
--- a/cobra/io/sbml3.py
+++ b/cobra/io/sbml3.py
@@ -1,34 +1,41 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import re
+from ast import And, BoolOp, Name, Or
+from bz2 import BZ2File
 from collections import defaultdict
-from warnings import warn, catch_warnings, simplefilter
 from decimal import Decimal
-from ast import Name, Or, And, BoolOp
 from gzip import GzipFile
-from bz2 import BZ2File
 from tempfile import NamedTemporaryFile
-from sys import exc_info
-import re
+from warnings import catch_warnings, simplefilter, warn
 
 from six import iteritems, string_types
 
-from .. import Metabolite, Reaction, Gene, Model
-from ..core.Gene import parse_gpr
-from ..manipulation.modify import _renames
-from ..manipulation.validate import check_reaction_bounds, \
-    check_metabolite_compartment_formula
+from cobra.core import Gene, Metabolite, Model, Reaction
+from cobra.core.gene import parse_gpr
+from cobra.manipulation.modify import _renames
+from cobra.manipulation.validate import check_metabolite_compartment_formula
+from cobra.util.solver import set_objective
 
 try:
-    from lxml.etree import parse, Element, SubElement, \
-        ElementTree, register_namespace, ParseError, XPath
+    from lxml.etree import (
+        parse, Element, SubElement, ElementTree, register_namespace,
+        ParseError, XPath)
+
     _with_lxml = True
 except ImportError:
-    warn("Install lxml for faster SBML I/O")
     _with_lxml = False
     try:
-        from xml.etree.cElementTree import parse, Element, SubElement, \
-            ElementTree, register_namespace, ParseError
+        from xml.etree.cElementTree import (
+            parse, Element, SubElement, ElementTree, register_namespace,
+            ParseError)
     except ImportError:
-        from xml.etree.ElementTree import parse, Element, SubElement, \
-            ElementTree, register_namespace, ParseError
+        XPath = None
+        from xml.etree.ElementTree import (
+            parse, Element, SubElement, ElementTree, register_namespace,
+            ParseError)
 
 # use sbml level 2 from sbml.py (which uses libsbml). Eventually, it would
 # be nice to use the libSBML converters directly instead.
@@ -37,16 +44,15 @@ try:
 except ImportError:
     libsbml = None
 else:
-    from .sbml import create_cobra_model_from_sbml_file as read_sbml2
-    from .sbml import write_cobra_model_to_sbml_file as write_sbml2
+    from cobra.io.sbml import create_cobra_model_from_sbml_file as read_sbml2
+    from cobra.io.sbml import write_cobra_model_to_sbml_file as write_sbml2
 
 try:
     from sympy import Basic
-except:
+except ImportError:
     class Basic:
         pass
 
-
 # deal with namespaces
 namespaces = {"fbc": "http://www.sbml.org/sbml/level3/version1/fbc/version2",
               "sbml": "http://www.sbml.org/sbml/level3/version1/core",
@@ -62,6 +68,7 @@ def ns(query):
         query = query.replace(prefix + ":", "{" + uri + "}")
     return query
 
+
 # XPATH query wrappers
 fbc_prefix = "{" + namespaces["fbc"] + "}"
 sbml_prefix = "{" + namespaces["sbml"] + "}"
@@ -82,6 +89,9 @@ SPECIES_XPATH = ns("sbml:listOfSpecies/sbml:species[@boundaryCondition='%s']")
 OBJECTIVES_XPATH = ns("fbc:objective[@fbc:id='%s']/"
                       "fbc:listOfFluxObjectives/"
                       "fbc:fluxObjective")
+LONG_SHORT_DIRECTION = {'maximize': 'max', 'minimize': 'min'}
+SHORT_LONG_DIRECTION = {'min': 'minimize', 'max': 'maximize'}
+
 if _with_lxml:
     RDF_ANNOTATION_XPATH = ("sbml:annotation/rdf:RDF/"
                             "rdf:Description[@rdf:about=$metaid]/"
@@ -99,8 +109,8 @@ else:
         search_xpath = RDF_ANNOTATION_XPATH % metaid
         for i in sbml_element.iterfind(search_xpath):
             yield get_attrib(i, "rdf:resource")
-        for i in sbml_element.iterfind(search_xpath
-                                       .replace("isEncodedBy", "is")):
+        for i in sbml_element.iterfind(search_xpath.replace(
+                "isEncodedBy", "is")):
             yield get_attrib(i, "rdf:resource")
 
 
@@ -112,7 +122,7 @@ def get_attrib(tag, attribute, type=lambda x: x, require=False):
     value = tag.get(ns(attribute))
     if require and value is None:
         msg = "required attribute '%s' not found in tag '%s'" % \
-                             (attribute, tag.tag)
+              (attribute, tag.tag)
         if tag.get("id") is not None:
             msg += " with id '%s'" % tag.get("id")
         elif tag.get("name") is not None:
@@ -307,7 +317,7 @@ def parse_xml_into_model(xml, number=float):
             reaction.upper_bound = bounds[ub_id]
             reaction.lower_bound = bounds[lb_id]
         except KeyError as e:
-            raise CobraSBMLError("No constant bound with id '%s'" % e.message)
+            raise CobraSBMLError("No constant bound with id '%s'" % str(e))
         reactions.append(reaction)
 
         stoichiometry = defaultdict(lambda: 0)
@@ -340,7 +350,7 @@ def parse_xml_into_model(xml, number=float):
         # set gene reaction rule
         gpr_xml = sbml_reaction.find(GPR_TAG)
         if gpr_xml is not None and len(gpr_xml) != 1:
-            warn("ignoring invalid geneAssocation for " + repr(reaction))
+            warn("ignoring invalid geneAssociation for " + repr(reaction))
             gpr_xml = None
         gpr = process_gpr(gpr_xml[0]) if gpr_xml is not None else ''
         # remove outside parenthesis, if any
@@ -348,24 +358,37 @@ def parse_xml_into_model(xml, number=float):
             gpr = gpr[1:-1].strip()
         gpr = gpr.replace(SBML_DOT, ".")
         reaction.gene_reaction_rule = gpr
-    model.add_reactions(reactions)
+    try:
+        model.add_reactions(reactions)
+    except ValueError as e:
+        warn(str(e))
 
     # objective coefficients are handled after all reactions are added
     obj_list = xml_model.find(ns("fbc:listOfObjectives"))
     if obj_list is None:
         warn("listOfObjectives element not found")
         return model
-    target_objective = get_attrib(obj_list, "fbc:activeObjective")
-    obj_query = OBJECTIVES_XPATH % target_objective
+    target_objective_id = get_attrib(obj_list, "fbc:activeObjective")
+    target_objective = obj_list.find(
+        ns("fbc:objective[@fbc:id='{}']".format(target_objective_id)))
+    obj_direction_long = get_attrib(target_objective, "fbc:type")
+    obj_direction = LONG_SHORT_DIRECTION[obj_direction_long]
+
+    obj_query = OBJECTIVES_XPATH % target_objective_id
+    coefficients = {}
     for sbml_objective in obj_list.findall(obj_query):
         rxn_id = clip(get_attrib(sbml_objective, "fbc:reaction"), "R_")
         try:
             objective_reaction = model.reactions.get_by_id(rxn_id)
-        except KeyError as e:
+        except KeyError:
             raise CobraSBMLError("Objective reaction '%s' not found" % rxn_id)
-        objective_reaction.objective_coefficient = \
-            get_attrib(sbml_objective, "fbc:coefficient", type=number)
-
+        try:
+            coefficients[objective_reaction] = get_attrib(
+                sbml_objective, "fbc:coefficient", type=number)
+        except ValueError as e:
+            warn(str(e))
+    set_objective(model, coefficients)
+    model.solver.objective.direction = obj_direction
     return model
 
 
@@ -398,7 +421,8 @@ def model_to_xml(cobra_model, units=True):
     set_attrib(obj_list_tmp, "fbc:activeObjective", "obj")
     obj_list_tmp = SubElement(obj_list_tmp, ns("fbc:objective"))
     set_attrib(obj_list_tmp, "fbc:id", "obj")
-    set_attrib(obj_list_tmp, "fbc:type", "maximize")
+    set_attrib(obj_list_tmp, "fbc:type",
+               SHORT_LONG_DIRECTION[cobra_model.objective.direction])
     flux_objectives_list = SubElement(obj_list_tmp,
                                       ns("fbc:listOfFluxObjectives"))
 
@@ -407,7 +431,7 @@ def model_to_xml(cobra_model, units=True):
     param_attr = {"constant": "true"}
     if units:
         param_attr["units"] = "mmol_per_gDW_per_hr"
-    # the most common bounds are the minimum, maxmium, and 0
+    # the most common bounds are the minimum, maximum, and 0
     if len(cobra_model.reactions) > 0:
         min_value = min(cobra_model.reactions.list_attr("lower_bound"))
         max_value = max(cobra_model.reactions.list_attr("upper_bound"))
@@ -465,16 +489,16 @@ def model_to_xml(cobra_model, units=True):
     # add in genes
     if len(cobra_model.genes) > 0:
         genes_list = SubElement(xml_model, GENELIST_TAG)
-    for gene in cobra_model.genes:
-        gene_id = gene.id.replace(".", SBML_DOT)
-        sbml_gene = SubElement(genes_list, GENE_TAG)
-        set_attrib(sbml_gene, "fbc:id", "G_" + gene_id)
-        name = gene.name
-        if name is None or len(name) == 0:
-            name = gene.id
-        set_attrib(sbml_gene, "fbc:label", gene_id)
-        set_attrib(sbml_gene, "fbc:name", gene.name)
-        annotate_sbml_from_cobra(sbml_gene, gene)
+        for gene in cobra_model.genes:
+            gene_id = gene.id.replace(".", SBML_DOT)
+            sbml_gene = SubElement(genes_list, GENE_TAG)
+            set_attrib(sbml_gene, "fbc:id", "G_" + gene_id)
+            name = gene.name
+            if name is None or len(name) == 0:
+                name = gene.id
+            set_attrib(sbml_gene, "fbc:label", gene_id)
+            set_attrib(sbml_gene, "fbc:name", name)
+            annotate_sbml_from_cobra(sbml_gene, gene)
 
     # add in reactions
     reactions_list = SubElement(xml_model, "listOfReactions")
@@ -528,7 +552,7 @@ def model_to_xml(cobra_model, units=True):
             gpr = gpr.replace(".", SBML_DOT)
             gpr_xml = SubElement(sbml_reaction, GPR_TAG)
             try:
-                parsed = parse_gpr(gpr)[0]
+                parsed, _ = parse_gpr(gpr)
                 construct_gpr_xml(gpr_xml, parsed.body)
             except Exception as e:
                 print("failed on '%s' in %s" %
@@ -539,13 +563,16 @@ def model_to_xml(cobra_model, units=True):
 
 
 def read_sbml_model(filename, number=float, **kwargs):
+    if not _with_lxml:
+        warn("Install lxml for faster SBML I/O", ImportWarning)
     xmlfile = parse_stream(filename)
     xml = xmlfile.getroot()
     # use libsbml if not l3v1 with fbc v2
-    if xml.get("level") != "3" or xml.get("version") != "1" or \
-            get_attrib(xml, "fbc:required") is None:
+    use_libsbml = (xml.get("level") != "3" or xml.get("version") != "1" or
+                   get_attrib(xml, "fbc:required") is None)
+    if use_libsbml:
         if libsbml is None:
-            raise Exception("libSBML required for fbc < 2")
+            raise ImportError("libSBML required for fbc < 2")
         # libsbml needs a file string, so write to temp file if a file handle
         if hasattr(filename, "read"):
             with NamedTemporaryFile(suffix=".xml", delete=False) as outfile:
@@ -553,7 +580,7 @@ def read_sbml_model(filename, number=float, **kwargs):
             filename = outfile.name
         return read_sbml2(filename, **kwargs)
     try:
-        return parse_xml_into_model(xml, number=number, **kwargs)
+        return parse_xml_into_model(xml, number=number)
     except Exception:
         raise CobraSBMLError(
             "Something went wrong reading the model. You can get a detailed "
@@ -563,7 +590,7 @@ def read_sbml_model(filename, number=float, **kwargs):
 
 id_required = {ns(i) for i in ("sbml:model", "sbml:reaction:", "sbml:species",
                                "fbc:geneProduct", "sbml:compartment",
-                               "sbml:paramter", "sbml:UnitDefinition",
+                               "sbml:parameter", "sbml:UnitDefinition",
                                "fbc:objective")}
 invalid_id_detector = re.compile("|".join(re.escape(i[0]) for i in _renames))
 
@@ -595,8 +622,9 @@ def validate_sbml_model(filename, check_model=True):
     xmlfile = parse_stream(filename)
     xml = xmlfile.getroot()
     # use libsbml if not l3v1 with fbc v2
-    if xml.get("level") != "3" or xml.get("version") != "1" or \
-            get_attrib(xml, "fbc:required") is None:
+    use_libsbml = (xml.get("level") != "3" or xml.get("version") != "1" or
+                   get_attrib(xml, "fbc:required") is None)
+    if use_libsbml:
         raise CobraSBMLError("XML is not SBML level 3 v1 with fbc v2")
 
     errors = {k: [] for k in ("validator", "warnings", "SBML errors", "other")}
@@ -606,12 +634,11 @@ def validate_sbml_model(filename, check_model=True):
 
     # make sure there is exactly one model
     xml_models = xml.findall(ns("sbml:model"))
+    if len(xml_models) == 0:
+        raise CobraSBMLError("No SBML model detected in file")
     if len(xml_models) > 1:
         err("More than 1 SBML model detected in file")
-    elif len(xml_models) == 0:
-        err("No SBML model detected in file")
-    else:
-        xml_model = xml_models[0]
+    xml_model = xml_models[0]
 
     # make sure all sbml id's are valid
     all_ids = set()
@@ -658,10 +685,10 @@ def validate_sbml_model(filename, check_model=True):
             model = parse_xml_into_model(xml)
         except CobraSBMLError as e:
             err(str(e), "SBML errors")
-            return (None, errors)
+            return None, errors
         except Exception as e:
             err(str(e), "other")
-            return (None, errors)
+            return None, errors
     errors["warnings"].extend(str(i.message) for i in warning_list)
 
     # check genes
@@ -673,16 +700,17 @@ def validate_sbml_model(filename, check_model=True):
             err("No gene specfied with id 'G_%s'" % gene.id)
 
     if check_model:
-        errors["validator"].extend(check_reaction_bounds(model))
         errors["validator"].extend(check_metabolite_compartment_formula(model))
 
     return model, errors
 
 
 def write_sbml_model(cobra_model, filename, use_fbc_package=True, **kwargs):
+    if not _with_lxml:
+        warn("Install lxml for faster SBML I/O", ImportWarning)
     if not use_fbc_package:
         if libsbml is None:
-            raise Exception("libSBML required to write non-fbc models")
+            raise ImportError("libSBML required to write non-fbc models")
         write_sbml2(cobra_model, filename, use_fbc_package=False, **kwargs)
         return
     # create xml
diff --git a/cobra/io/yaml.py b/cobra/io/yaml.py
new file mode 100644
index 0000000..040d32e
--- /dev/null
+++ b/cobra/io/yaml.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import io
+
+from six import string_types
+from ruamel import yaml
+
+from cobra.io.dict import model_to_dict, model_from_dict
+
+YAML_SPEC = "1"
+
+
+def to_yaml(model, **kwargs):
+    """
+    Return the model as a YAML document.
+
+    ``kwargs`` are passed on to ``yaml.dump``.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to represent.
+
+    Returns
+    -------
+    str
+        String representation of the cobra model as a YAML document.
+
+    See Also
+    --------
+    save_yaml_model : Write directly to a file.
+    ruamel.yaml.dump : Base function.
+    """
+    obj = model_to_dict(model)
+    obj["version"] = YAML_SPEC
+    return yaml.dump(obj, Dumper=yaml.RoundTripDumper, **kwargs)
+
+
+def from_yaml(document):
+    """
+    Load a cobra model from a YAML document.
+
+    Parameters
+    ----------
+    document : str
+        The YAML document representation of a cobra model.
+
+    Returns
+    -------
+    cobra.Model
+        The cobra model as represented in the YAML document.
+
+    See Also
+    --------
+    load_yaml_model : Load directly from a file.
+    """
+    return model_from_dict(yaml.load(document, yaml.RoundTripLoader))
+
+
+def save_yaml_model(model, filename, **kwargs):
+    """
+    Write the cobra model to a file in YAML format.
+
+    ``kwargs`` are passed on to ``yaml.dump``.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to represent.
+    filename : str or file-like
+        File path or descriptor that the YAML representation should be
+        written to.
+
+    See Also
+    --------
+    to_yaml : Return a string representation.
+    ruamel.yaml.dump : Base function.
+    """
+    obj = model_to_dict(model)
+    obj["version"] = YAML_SPEC
+    if isinstance(filename, string_types):
+        with io.open(filename, "w") as file_handle:
+            yaml.dump(obj, file_handle, Dumper=yaml.RoundTripDumper, **kwargs)
+    else:
+        yaml.dump(obj, filename, Dumper=yaml.RoundTripDumper, **kwargs)
+
+
+def load_yaml_model(filename):
+    """
+    Load a cobra model from a file in YAML format.
+
+    Parameters
+    ----------
+    filename : str or file-like
+        File path or descriptor that contains the YAML document describing the
+        cobra model.
+
+    Returns
+    -------
+    cobra.Model
+        The cobra model as represented in the YAML document.
+
+    See Also
+    --------
+    from_yaml : Load from a string.
+    """
+    if isinstance(filename, string_types):
+        with io.open(filename, "r") as file_handle:
+            return model_from_dict(yaml.load(file_handle,
+                                             yaml.RoundTripLoader))
+    else:
+        return model_from_dict(yaml.load(filename, yaml.RoundTripLoader))
diff --git a/cobra/manipulation/__init__.py b/cobra/manipulation/__init__.py
index 6bf72f7..649153d 100644
--- a/cobra/manipulation/__init__.py
+++ b/cobra/manipulation/__init__.py
@@ -1,8 +1,14 @@
-from .delete import delete_model_genes, undelete_model_genes, remove_genes, \
-    find_gene_knockout_reactions
-from .modify import initialize_growth_medium, convert_to_irreversible, \
-    revert_to_reversible, escape_ID, canonical_form, \
-    get_compiled_gene_reaction_rules
-from .annotate import add_SBO
-from .validate import check_mass_balance, check_reaction_bounds, \
-    check_metabolite_compartment_formula
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from cobra.manipulation.annotate import add_SBO
+from cobra.manipulation.delete import (
+    delete_model_genes, find_gene_knockout_reactions, remove_genes,
+    undelete_model_genes)
+from cobra.manipulation.modify import (
+    canonical_form, convert_to_irreversible, escape_ID,
+    get_compiled_gene_reaction_rules, revert_to_reversible)
+from cobra.manipulation.validate import (
+    check_mass_balance, check_metabolite_compartment_formula,
+    check_reaction_bounds)
diff --git a/cobra/manipulation/annotate.py b/cobra/manipulation/annotate.py
index a6fd785..d1aabac 100644
--- a/cobra/manipulation/annotate.py
+++ b/cobra/manipulation/annotate.py
@@ -1,4 +1,6 @@
-from six import iteritems
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
 
 
 def add_SBO(model):
diff --git a/cobra/manipulation/delete.py b/cobra/manipulation/delete.py
index 93aa8e4..6a6b61c 100644
--- a/cobra/manipulation/delete.py
+++ b/cobra/manipulation/delete.py
@@ -1,8 +1,12 @@
-from ast import NodeTransformer, And
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from ast import And, NodeTransformer
 
 from six import iteritems, string_types
 
-from cobra.core.Gene import eval_gpr, parse_gpr, ast2str
+from cobra.core.gene import ast2str, eval_gpr, parse_gpr
 
 
 def prune_unused_metabolites(cobra_model):
@@ -10,7 +14,7 @@ def prune_unused_metabolites(cobra_model):
 
     Parameters
     ----------
-    cobra_model: cobra.core.Model
+    cobra_model: cobra.Model
         the model to remove unused metabolites from
 
     Returns
@@ -34,7 +38,7 @@ def prune_unused_reactions(cobra_model):
 
     Parameters
     ----------
-    cobra_model: cobra.core.Model
+    cobra_model: cobra.Model
         the model to remove unused reactions from
 
     Returns
diff --git a/cobra/manipulation/modify.py b/cobra/manipulation/modify.py
index 7866e4d..fcfe681 100644
--- a/cobra/manipulation/modify.py
+++ b/cobra/manipulation/modify.py
@@ -1,14 +1,17 @@
-from copy import deepcopy
-from warnings import warn
-from itertools import chain
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 from ast import NodeTransformer
+from itertools import chain
 
 from six import iteritems
+from warnings import warn
 
-from .. import Reaction, Metabolite, Gene
-from .delete import get_compiled_gene_reaction_rules
-from ..core.Gene import ast2str
-
+from cobra.core import Gene, Metabolite, Reaction
+from cobra.core.gene import ast2str
+from cobra.manipulation.delete import get_compiled_gene_reaction_rules
+from cobra.util.solver import set_objective
 
 _renames = (
     (".", "_DOT_"),
@@ -43,7 +46,6 @@ def _escape_str_id(id_str):
 
 
 class _GeneEscaper(NodeTransformer):
-
     def visit_Name(self, node):
         node.id = _escape_str_id(node.id)
         return node
@@ -98,6 +100,7 @@ def rename_genes(cobra_model, rename_dict):
         def visit_Name(self, node):
             node.id = rename_dict.get(node.id, node.id)
             return node
+
     gene_renamer = Renamer()
     for rxn, rule in iteritems(get_compiled_gene_reaction_rules(cobra_model)):
         if rule is not None:
@@ -109,107 +112,6 @@ def rename_genes(cobra_model, rename_dict):
         cobra_model.genes.remove(i)
 
 
-def initialize_growth_medium(cobra_model, the_medium='MgM',
-                             external_boundary_compartment='e',
-                             external_boundary_reactions=None,
-                             reaction_lower_bound=0.,
-                             reaction_upper_bound=1000.,
-                             irreversible=False,
-                             reactions_to_disable=None):
-    """Sets all of the input fluxes to the model to zero and then will
-    initialize the input fluxes to the values specified in the_medium if
-    it is a dict or will see if the model has a composition dict and use
-    that to do the initialization.
-
-    cobra_model: A cobra.Model object.
-
-
-    the_medium: A string, or a dictionary.
-    If a string then the initialize_growth_medium function expects that
-    the_model has an attribute dictionary called media_compositions, which is a
-    dictionary of dictionaries for various medium compositions.  Where a medium
-    composition is a dictionary of external boundary reaction ids for the
-    medium components and the external boundary fluxes for each medium
-    component.
-
-
-    external_boundary_compartment: None or a string.
-    If not None then it specifies the compartment in which to disable all of
-    the external systems boundaries.
-
-    external_boundary_reactions: None or a list of external_boundaries that are
-    to have their bounds reset.  This acts in conjunction with
-    external_boundary_compartment.
-
-
-    reaction_lower_bound: Float.  The default value to use for the lower
-    bound for the boundary reactions.
-
-    reaction_upper_bound: Float.  The default value to use for the upper
-    bound for the boundary.
-
-    irreversible: Boolean.  If the model is irreversible then the medium
-    composition is taken as the upper bound
-
-    reactions_to_disable: List of reactions for which the upper and lower
-    bounds are disabled.  This is superceded by the contents of
-    media_composition
-
-    """
-    # Zero all of the inputs to the model
-    if hasattr(the_medium, 'keys'):
-        medium_composition = the_medium
-    else:
-        if hasattr(cobra_model, 'media_compositions'):
-            if the_medium in cobra_model.media_compositions:
-                medium_composition = cobra_model.media_compositions[the_medium]
-            else:
-                raise Exception("%s is not in the model's media list" %
-                                the_medium)
-        else:
-            raise Exception("the model doesn't have attribute "
-                            "media_compositions and the medium is not a dict")
-    if external_boundary_reactions is not None:
-        if isinstance(external_boundary_reactions[0], str):
-            external_boundary_reactions = map(cobra_model.reactions.get_by_id,
-                                              external_boundary_reactions)
-    elif external_boundary_compartment is None:
-            warn("We are initializing the medium without first adjusting all"
-                 "external boundary reactions")
-
-    # Select the system_boundary reactions to reset
-    if external_boundary_compartment is not None:
-        _system_boundaries = dict([(x, x.get_compartments())
-                                   for x in cobra_model.reactions
-                                   if x.boundary == 'system_boundary'])
-        [_system_boundaries.pop(k) for k, v in list(_system_boundaries.items())
-         if len(v) == 1 and external_boundary_compartment not in v]
-        if external_boundary_reactions is None:
-            external_boundary_reactions = _system_boundaries.keys()
-        else:
-            external_boundary_reactions += _system_boundaries.keys()
-
-    for the_reaction in external_boundary_reactions:
-        the_reaction.lower_bound = reaction_lower_bound
-        if the_reaction.upper_bound == 0:
-            the_reaction.upper_bound = reaction_upper_bound
-    # Disable specified reactions
-    if reactions_to_disable is not None:
-        if isinstance(reactions_to_disable[0], str):
-            reactions_to_disable = map(cobra_model.reactions.get_by_id,
-                                       reactions_to_disable)
-        for the_reaction in reactions_to_disable:
-            the_reaction.lower_bound = the_reaction.upper_bound = 0.
-
-    # Update the model inputs based on the_medium
-    for the_component in medium_composition.keys():
-        the_reaction = cobra_model.reactions.get_by_id(the_component)
-        if irreversible:
-            the_reaction.upper_bound = medium_composition[the_component]
-        else:
-            the_reaction.lower_bound = medium_composition[the_component]
-
-
 def convert_to_irreversible(cobra_model):
     """Split reversible reactions into two irreversible reactions
 
@@ -220,7 +122,9 @@ def convert_to_irreversible(cobra_model):
     cobra_model: A Model object which will be modified in place.
 
     """
+    warn("deprecated, not applicable for optlang solvers", DeprecationWarning)
     reactions_to_add = []
+    coefficients = {}
     for reaction in cobra_model.reactions:
         # If a reaction is reverse only, the forward reaction (which
         # will be constrained to 0) will be left in the model.
@@ -228,8 +132,8 @@ def convert_to_irreversible(cobra_model):
             reverse_reaction = Reaction(reaction.id + "_reverse")
             reverse_reaction.lower_bound = max(0, -reaction.upper_bound)
             reverse_reaction.upper_bound = -reaction.lower_bound
-            reverse_reaction.objective_coefficient = \
-                reaction.objective_coefficient * -1
+            coefficients[
+                reverse_reaction] = reaction.objective_coefficient * -1
             reaction.lower_bound = max(0, reaction.lower_bound)
             reaction.upper_bound = max(0, reaction.upper_bound)
             # Make the directions aware of each other
@@ -246,15 +150,19 @@ def convert_to_irreversible(cobra_model):
             reverse_reaction._gene_reaction_rule = reaction._gene_reaction_rule
             reactions_to_add.append(reverse_reaction)
     cobra_model.add_reactions(reactions_to_add)
+    set_objective(cobra_model, coefficients, additive=True)
 
 
 def revert_to_reversible(cobra_model, update_solution=True):
-    """This function will convert a reversible model made by
+    """This function will convert an irreversible model made by
     convert_to_irreversible into a reversible model.
 
-    cobra_model: A cobra.Model which will be modified in place.
-
+    cobra_model : cobra.Model
+        A model which will be modified in place.
+    update_solution: bool
+        This option is ignored since `model.solution` was removed.
     """
+    warn("deprecated, not applicable for optlang solvers", DeprecationWarning)
     reverse_reactions = [x for x in cobra_model.reactions
                          if "reflection" in x.notes and
                          x.id.endswith('_reverse')]
@@ -263,12 +171,6 @@ def revert_to_reversible(cobra_model, update_solution=True):
     if len(reverse_reactions) == 0:
         return
 
-    update_solution = update_solution and cobra_model.solution is not None \
-        and cobra_model.solution.status != "NA"
-
-    if update_solution:
-        x_dict = cobra_model.solution.x_dict
-
     for reverse in reverse_reactions:
         forward_id = reverse.notes.pop("reflection")
         forward = cobra_model.reactions.get_by_id(forward_id)
@@ -276,11 +178,6 @@ def revert_to_reversible(cobra_model, update_solution=True):
         if forward.upper_bound == 0:
             forward.upper_bound = -reverse.lower_bound
 
-        # update the solution dict
-        if update_solution:
-            if reverse.id in x_dict:
-                x_dict[forward_id] -= x_dict.pop(reverse.id)
-
         if "reflection" in forward.notes:
             forward.notes.pop("reflection")
 
@@ -289,11 +186,6 @@ def revert_to_reversible(cobra_model, update_solution=True):
     # probably speed things up here.
     cobra_model.remove_reactions(reverse_reactions)
 
-    # update the solution vector
-    if update_solution:
-        cobra_model.solution.x_dict = x_dict
-        cobra_model.solution.x = [x_dict[r.id] for r in cobra_model.reactions]
-
 
 def canonical_form(model, objective_sense='maximize',
                    already_irreversible=False, copy=True):
@@ -316,6 +208,7 @@ def canonical_form(model, objective_sense='maximize',
     copy: bool. Copy the model before making any modifications.
 
     """
+    warn("deprecated, not applicable for optlang solvers", DeprecationWarning)
     if copy:
         model = model.copy()
 
diff --git a/cobra/manipulation/validate.py b/cobra/manipulation/validate.py
index 7d5aa72..cf39969 100644
--- a/cobra/manipulation/validate.py
+++ b/cobra/manipulation/validate.py
@@ -1,4 +1,9 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
 from math import isinf, isnan
+from warnings import warn
 
 NOT_MASS_BALANCED_TERMS = {"SBO:0000627",  # EXCHANGE
                            "SBO:0000628",  # DEMAND
@@ -9,7 +14,6 @@ NOT_MASS_BALANCED_TERMS = {"SBO:0000627",  # EXCHANGE
 
 
 def check_mass_balance(model):
-    warnings = []
     unbalanced = {}
     for reaction in model.reactions:
         if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS:
@@ -19,7 +23,10 @@ def check_mass_balance(model):
     return unbalanced
 
 
+# no longer strictly necessary, done by optlang solver interfaces
 def check_reaction_bounds(model):
+    warn("no longer necessary, done by optlang solver interfaces",
+         DeprecationWarning)
     errors = []
     for reaction in model.reactions:
         if reaction.lower_bound > reaction.upper_bound:
diff --git a/cobra/oven/README b/cobra/oven/README
deleted file mode 100644
index 2eebc5a..0000000
--- a/cobra/oven/README
+++ /dev/null
@@ -1,30 +0,0 @@
-This is the location for self-contained add-ons that are currently baking.  Please try to organize your buns into rational categories.  Occasionally, I will reorganize items in a logical fashion as the module set grows and my vision develops.
-
-If you want to contribute to the oven, please create a directory with your sourceforge username and put all files in it.
-
-
-For modifications to the pre-existing code, please continue to use the patch tracker or contact the sourceforge username associated with the files.
-
-Please follow the python style guide
- http://www.python.org/dev/peps/pep-0008/
-and document thoroughly.  Modules that deviate from these two commandments will not make it into the core and may result in revocation of svn privileges.
-
-
-Also, remember the following:
-1) Document thoroughly. The world is much bigger than you really seem to think.
-2) No camelCase or camelToes no matter who sports them.  The exception: classes must be defined with CamelToeCase.
-3) No non-standard lazy abbreviations kegg is acceptable mets, rxns, and
-coefs are not.
-4) phrasesmusthaveunderscores is bad phrases_must_have_underscores is
-good
-5) For default values in functions don't put spaces around the =. def
-my_cat(color = 'black') is bad. def my_cat(color='black') is good.
-6) Also, unless absolutely necessary you should import functions from modules and not use the whole path name.  This allows for faster transitions if we need to change an upstream module or want to maintain python and jython compatibility, or want to test some new package. 
-
-If a function something requires a very specific set of data files then it
-is best to make a module.
-
-
-Happy coding!
-
-Dr. S.
diff --git a/cobra/oven/WARNING b/cobra/oven/WARNING
deleted file mode 100644
index 82427cb..0000000
--- a/cobra/oven/WARNING
+++ /dev/null
@@ -1 +0,0 @@
-THIS DIRECTORY IS SUBJECT TO RANDOM MUTATIONS THAT ARE AESTHETICALLY APPEALING TO YOUR FRIENDLY FASCIST.
diff --git a/cobra/oven/__init__.py b/cobra/oven/__init__.py
deleted file mode 100644
index ec281e6..0000000
--- a/cobra/oven/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from warnings import warn
-warn("Functions in cobra.oven are still being baked thus are not officially supported and may not function")
diff --git a/cobra/oven/aliebrahim/__init__.py b/cobra/oven/aliebrahim/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/cobra/oven/aliebrahim/designAnalysis.py b/cobra/oven/aliebrahim/designAnalysis.py
deleted file mode 100644
index a0dc2fc..0000000
--- a/cobra/oven/aliebrahim/designAnalysis.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import numpy
-import pylab
-
-#from ... import solvers
-from cobra import solvers
-from itertools import combinations
-
-
-def plot_production_envelope(model, target_id, n_points=20, plot=True,
-        solver_name="glpk"):
-    """Plot the production envelope for the model given a target
-
-    Parameters
-    ----------
-    model : cobra model
-        The cobra model should already have the uptake rates se
-    target_id : str
-        The id of the exchange reaction for the target compound
-    n_points : int
-        The number of points to calculate for the production envolope
-    plot : bool, optional
-        Whether or not a plot should be made of the production envelope
-
-    Returns
-    -------
-    growth_rates : :class:`numpy.ndarray`
-        An array of growth rates
-    production_rates : :class:`numpy.ndarray`
-        An array of the corresponding maximum production rate at the
-        given growth rate.
-
-    """
-    solver = solvers.solver_dict[solver_name]
-    target_id = str(target_id)
-    target_reaction = model.reactions.get_by_id(target_id)
-    original_target_bounds = (target_reaction.lower_bound,
-                              target_reaction.upper_bound)
-    lp = solver.create_problem(model)
-    if solver.solve_problem(lp) != "optimal":
-        return ([0], [0])
-    solution = solver.format_solution(lp, model)
-    max_growth_rate = solution.f
-    max_growth_production = solution.x_dict[target_reaction.id]
-    #growth_coupled = max_growth_production > 0
-    # extract the current objective so it can be changed
-    original_objectives = {}
-    for reaction in model.reactions:
-        if reaction.objective_coefficient != 0:
-            original_objectives[reaction] = reaction.objective_coefficient
-            reaction.objective_coefficient = 0
-    # calculate the maximum possible production rate
-    target_reaction.objective_coefficient = 1
-    model.optimize(objective_sense="minimize")
-    min_production_rate = model.solution.f
-    model.optimize(objective_sense="maximize")
-    max_production_rate = model.solution.f
-    production_rates = numpy.linspace(min_production_rate,
-        max_production_rate, n_points)
-    # ensure the point of production at maximum growth is included
-    production_rates[
-        numpy.abs(production_rates - max_growth_production).argmin()] = \
-        max_growth_production
-    # if the 0 point was overwritten in the last operation
-    if production_rates[0] != 0:
-        production_rates[1] = production_rates[0]
-        production_rates[0] = 0
-    growth_rates = production_rates * 0
-    # make the objective coefficient what it was before
-    target_reaction.objective_coefficient = 0
-    for reaction, coefficient in original_objectives.iteritems():
-        reaction.objective_coefficient = coefficient
-    # calculate the maximum growth rate at each production rate
-    for i in range(n_points):
-        target_reaction.lower_bound = production_rates[i]
-        target_reaction.upper_bound = production_rates[i]
-        solver.update_problem(lp, model)
-        if solver.solve_problem(lp) == "optimal":
-            growth_rates[i] = solver.get_objective_value(lp)
-        else:
-            growth_rates[i] = 0
-    # reset the bounds on the target reaction
-    target_reaction.lower_bound = original_target_bounds[0]
-    target_reaction.upper_bound = original_target_bounds[1]
-    if plot:
-        pylab.plot(growth_rates, production_rates)
-        pylab.title("Production envelope for %s" % (target_id))
-        pylab.xlabel("Growth rate")
-        pylab.ylabel("Production rate")
-        pylab.xlim(xmin=0)
-        pylab.ylim(ymin=0)
-    return (growth_rates, production_rates)
-
-
-def analyze_growth_coupled_num_knockouts(model, knockout_reaction, target_name="EX_etoh_e"):
-    None
-    
-
-def analyze_growth_coupled_design_subset(model, knockout_reactions, knockout_count, target_name="EX_etoh_e"):
-    lp = model.optimize()
-    best_score = 0
-    best = []
-    lb = [None] * k  # store lower bounds when reactions are knocked out
-    ub = [None] * k  # store upper bounds when reactions are knocked out
-    for subset in combinations(knockout_reactions, knockout_reactions):
-        # knockout reactions
-        for i, reaction_name in enumerate(subset):
-            reaction = model.reactions.get_by_id(str(reaction_name))
-            (lb[i], ub[i]) = (reaction.lower_bound, reaction.upper_bound)
-            (reaction.lower_bound, reaction.upper_bound) = (0.0, 0.0)
-        model.optimize()
-        production = model.solution.x_dict[target_name]
-        # identical performance
-        if abs(production - best_score) < 0.001:
-            best.append(subset)
-        # better performance
-        elif production > best_score:
-            best_score = model.solution.x_dict[target_name]
-            best = [subset]
-        print model.solution.f, model.solution.x_dict[target_name]
-        # reset reactions
-        for i, reaction_name in enumerate(subset):
-            (reaction.lower_bound, reaction.upper_bound) = (lb[i], ub[i])
-    return best_score, best
-
-if __name__ == "__main__":
-    from cobra.test import ecoli_pickle, create_test_model
-    from time import time
-
-    model = create_test_model(ecoli_pickle)
-    #from IPython import embed; embed()
-    model.reactions.get_by_id("EX_o2_e").lower_bound = 0
-    #analyze_strain_design(model, ["ABTA", "ACALD", "ACKr", "ATPS4rpp", "F6PA",
-    #          "GLUDy", "LDH_D", "MGSA", "PFL", "TPI"])
-
-    for i in ["ABTA", "ACALD", "ACKr", "ATPS4rpp", "F6PA",
-              "GLUDy", "LDH_D", "MGSA", "PFL", "TPI"]:
-        model.reactions.get_by_id(i).lower_bound = 0
-        model.reactions.get_by_id(i).upper_bound = 0
-    start = time()
-    plot_production_envelope(model, "EX_etoh_e", solver_name="glpk", n_points=40, plot=True)
-    print "ran in %.2f seconds" % (time() - start)
-    pylab.show()
-    # calculates in approx 1 seconds on 3.4 GHz i7
diff --git a/cobra/oven/aliebrahim/gapAnalysis_MILP_figure.svg b/cobra/oven/aliebrahim/gapAnalysis_MILP_figure.svg
deleted file mode 100644
index 026799a..0000000
--- a/cobra/oven/aliebrahim/gapAnalysis_MILP_figure.svg
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Generator: Adobe Illustrator 15.0.2, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
-<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
-	 width="792px" height="612px" viewBox="0 0 792 612" enable-background="new 0 0 792 612" xml:space="preserve">
-<rect x="8.5" y="116.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="450" height="300"/>
-<rect x="8.5" y="116.5" fill="#B9E5FB" stroke="#000000" stroke-miterlimit="10" width="350" height="200"/>
-<rect x="8.5" y="116.5" fill="#6DCFF6" stroke="#000000" stroke-miterlimit="10" width="250" height="150"/>
-<text transform="matrix(1 0 0 1 96.4087 191.5)" font-family="'MyriadPro-Regular'" font-size="12">Original Model</text>
-<text transform="matrix(1 0 0 1 281.0386 286.4717)" font-family="'MyriadPro-Regular'" font-size="12">SUX Model</text>
-<rect x="258.5" y="316.5" fill="#C7C8CA" stroke="#000000" stroke-miterlimit="10" width="100" height="100"/>
-<rect x="358.5" y="316.5" fill="#C7C8CA" stroke="#000000" stroke-miterlimit="10" width="100" height="100"/>
-<text transform="matrix(1 0 0 1 306.502 366.5)" font-family="'TimesNewRomanPSMT'" font-size="12">I</text>
-<text transform="matrix(1 0 0 1 401.5039 366.5)" font-family="'TimesNewRomanPSMT'" font-size="12">-vI</text>
-<rect x="486.5" y="61.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="20" height="450"/>
-<rect x="486.5" y="61.5" fill="#B9E5FB" stroke="#000000" stroke-miterlimit="10" width="20" height="350"/>
-<rect x="486.5" y="61.5" fill="#6DCFF6" stroke="#000000" stroke-miterlimit="10" width="20" height="250"/>
-<rect x="536.5" y="116.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="20" height="300"/>
-<rect x="536.5" y="116.5" fill="#B9E5FB" stroke="#000000" stroke-miterlimit="10" width="20" height="200"/>
-<rect x="536.5" y="116.5" fill="#6DCFF6" stroke="#000000" stroke-miterlimit="10" width="20" height="150"/>
-<text transform="matrix(1 0 0 1 464.917 266.5)" font-family="'TimesNewRomanPSMT'" font-size="24">X</text>
-<text transform="matrix(1 0 0 1 513.1074 266.5)" font-family="'TimesNewRomanPSMT'" font-size="24">=</text>
-<g>
-	<g>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="8" y1="99.5" x2="257" y2="99.5"/>
-		<g>
-			<rect x="8" y="96" width="1" height="7"/>
-		</g>
-		<g>
-			<rect x="257" y="96" width="1" height="7"/>
-		</g>
-	</g>
-</g>
-<text transform="matrix(1 0 0 1 88.1284 99.5)" font-family="'MyriadPro-Regular'" font-size="12">Original Reactions</text>
-<g>
-	<g>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="258" y1="99.5" x2="358" y2="99.5"/>
-		<g>
-			<rect x="258" y="96" width="1" height="7"/>
-		</g>
-		<g>
-			<rect x="357" y="96" width="1" height="7"/>
-		</g>
-	</g>
-</g>
-<text transform="matrix(1 0 0 1 268.4287 99.5)" font-family="'MyriadPro-Regular'" font-size="12">Added Reactions</text>
-<g>
-	<g>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="358" y1="99.5" x2="458" y2="99.5"/>
-		<g>
-			<rect x="358" y="96" width="1" height="7"/>
-		</g>
-		<g>
-			<rect x="457" y="96" width="1" height="7"/>
-		</g>
-	</g>
-</g>
-<text transform="matrix(1 0 0 1 384.1543 99.5)" font-family="'MyriadPro-Regular'" font-size="12">Dummies</text>
-<rect x="8" y="317" fill="none" width="250" height="100"/>
-<text transform="matrix(1 0 0 1 130 367)" font-family="'TimesNewRomanPSMT'" font-size="12">0</text>
-<rect x="358" y="117" fill="none" width="100" height="200"/>
-<text transform="matrix(1 0 0 1 405 217)" font-family="'TimesNewRomanPSMT'" font-size="12">0</text>
-<rect x="8.5" y="416.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="450" height="15"/>
-<rect x="32.5" y="116.5" opacity="0.25" fill="#E49E86" stroke="#000000" stroke-miterlimit="10" width="15" height="150"/>
-<g>
-	<g>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="32.5" y1="267" x2="32.5" y2="268"/>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="2,4" x1="32.5" y1="272" x2="32.5" y2="429"/>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="32.5" y1="431" x2="32.5" y2="432"/>
-	</g>
-</g>
-<g>
-	<g>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="47.5" y1="267" x2="47.5" y2="268"/>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="2,4" x1="47.5" y1="272" x2="47.5" y2="429"/>
-		<line fill="none" stroke="#000000" stroke-miterlimit="10" x1="47.5" y1="431" x2="47.5" y2="432"/>
-	</g>
-</g>
-<text transform="matrix(0 -1 1 0 43.9521 232.8433)" font-family="'MyriadPro-Regular'" font-size="12">Objective function</text>
-<text transform="matrix(1 0 0 1 16.1074 427.7207)" font-family="'TimesNewRomanPSMT'" font-size="12">0</text>
-<text transform="matrix(1 0 0 1 228.3989 427.4844)" font-family="'TimesNewRomanPSMT'" font-size="12">0</text>
-<text transform="matrix(1 0 0 1 37 427.7207)" font-family="'TimesNewRomanPSMT'" font-size="12">1</text>
-<rect x="536.5" y="416.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="20" height="20"/>
-<text transform="matrix(1 0 0 1 481.1963 55.9136)" font-family="'MyriadPro-Regular'" font-size="12">Fluxes</text>
-<text transform="matrix(1 0 0 1 522.917 61.5)"><tspan x="0" y="0" font-family="'MyriadPro-Regular'" font-size="12">Metabolite</tspan><tspan x="-0.834" y="14.4" font-family="'MyriadPro-Regular'" font-size="12">production</tspan></text>
-<text transform="matrix(0 -1 1 0 499 490.8418)"><tspan x="0" y="0" font-family="'SymbolMT'" font-size="12">∈</tspan><tspan x="8.555" y="0" font-family="'TimesNewRomanPSMT'" font-size="12">{0,1}</tspan></text>
-<text transform="matrix(1 0 0 1 541.4492 426.5)" font-family="'TimesNewRomanPSMT'" font-size="12">>t</text>
-<text transform="matrix(1 0 0 1 540.1162 364.333)" font-family="'TimesNewRomanPSMT'" font-size="12"><0</text>
-<text transform="matrix(1 0 0 1 538.9492 186.5)" font-family="'TimesNewRomanPSMT'" font-size="12">=0</text>
-<text transform="matrix(1 0 0 1 540.1162 292.5)" font-family="'TimesNewRomanPSMT'" font-size="12">=0</text>
-<text transform="matrix(1 0 0 1 572.9141 274)" font-family="'TimesNewRomanPSMT'" font-size="48">:</text>
-<rect x="598.5" y="61.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="20" height="450"/>
-<rect x="598.5" y="61.5" fill="#B9E5FB" stroke="#000000" stroke-miterlimit="10" width="20" height="350"/>
-<rect x="598.5" y="61.5" fill="#6DCFF6" stroke="#000000" stroke-miterlimit="10" width="20" height="250"/>
-<text transform="matrix(1 0 0 1 593.1963 55.9136)" font-family="'MyriadPro-Regular'" font-size="12">Fluxes</text>
-<text transform="matrix(0 -1 1 0 611 490.8418)"><tspan x="0" y="0" font-family="'SymbolMT'" font-size="12">∈</tspan><tspan x="8.555" y="0" font-family="'TimesNewRomanPSMT'" font-size="12">{0,1}</tspan></text>
-<text transform="matrix(1 0 0 1 628.0469 268.5)" font-family="'TimesNewRomanPSMT'" font-size="24">•</text>
-<rect x="644.5" y="61.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="20" height="450"/>
-<rect x="644.5" y="411.5" fill="none" stroke="#000000" stroke-miterlimit="10" width="20" height="100"/>
-<text transform="matrix(1 0 0 1 651.5 266.5)" font-family="'TimesNewRomanPSMT'" font-size="12">0</text>
-<text transform="matrix(0 -1 1 0 658.2852 490.4404)" font-family="'MyriadPro-Regular'" font-size="12">Penalties</text>
-<text transform="matrix(1 0 0 1 673.1367 267)" font-family="'MyriadPro-Regular'" font-size="18">is minimized</text>
-</svg>
diff --git a/cobra/oven/aliebrahim/keggIO.py b/cobra/oven/aliebrahim/keggIO.py
deleted file mode 100644
index 1607d9c..0000000
--- a/cobra/oven/aliebrahim/keggIO.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import csv
-import re
-import copy
-from os.path import join, abspath, split
-
-import cobra
-
-# the default file locations
-kegg_directory = join(split(abspath(__file__))[0], "kegg_files")
-keggdictpath_default = join(kegg_directory, "kegg_dict.csv")
-reactionlst_default = join(kegg_directory, "reaction.lst")
-blacklistpath_default = join(kegg_directory, "kegg_blacklist.csv")
-
-
-def _intify(string):
-    """returns integer representation of the str
-    If str is a single letter, it will return 1"""
-    if string.isdigit():
-        return int(string)
-    # if the expression contains n, the default value is 2
-    n = 2
-    if string == "2n":
-        return 2 * n
-    try:
-        return eval(string)
-    except:
-        raise ValueError(string)
-
-
-def _parse_split_array(str_array):
-    """takes in an array of strings, each of which is either
-    - a compound OR
-    - a number followed by a compound
-    returns [array_of_metabolites, corresponding_coefficient]"""
-    metabolites = []
-    coefficients = []
-    for string in str_array:
-        string = string.strip()
-        if string[0].isupper():  # starts with an uppercase letter
-            # there is no number associated, so it should be 1
-            metabolites.append(string)
-            coefficients.append(1)
-        else:
-            the_coefficient, the_metabolite = string.split()
-            metabolites.append(the_metabolite)
-            coefficients.append(_intify(the_coefficient))
-    return [metabolites, coefficients]
-
-
-def import_kegg_reactions(compartment="c", reactionlstpath=None,
-                        keggdictpath=None, blacklistpath=None):
-    """reads in kegg reactions from the three given files
-    compartment: the compartment to which each reaction will be added
-
-    If no file is specified for any of these, a default file will be used:
-    reactionlstpath: path to path of kegg reactions
-    the format should be
-    reactionid: Met1 + 2 Met2 <=> Met3 + 2 Met4
-
-    keggdictpath: The path to a csv file translating between kegg and cobra
-    metabolite ID's, where the first column contains the kegg ID, and the
-    second contains cobra id
-
-    blacklistpath: path to a file listing the blacklisted reactions, with
-    one per line
-
-    returns: cobra model with all of the included reactions
-    """
-
-    if reactionlstpath is None:
-        reactionlstpath = reactionlst_default
-    if keggdictpath is None:
-        keggdictpath = keggdictpath_default
-    if blacklistpath is None:
-        blacklistpath = blacklistpath_default
-
-    # read in kegg dictionary to translate between kegg and cobra id's
-    keggdictfile = open(keggdictpath, "r")
-    keggdictcsv = csv.reader(keggdictfile)
-    keggdict = {}
-    for line in keggdictcsv:
-        keggdict[line[1]] = line[0]
-    keggdictfile.close()
-    # read in the kegg blacklist
-    keggblacklistfile = open(blacklistpath, "r")
-    keggblacklistcsv = csv.reader(keggblacklistfile)
-    keggblacklist = []
-    for line in keggblacklistcsv:
-        keggblacklist.append(line[0])
-    keggblacklistfile.close()
-
-    # parse the file of kegg reactions
-    keggfile = open(reactionlstpath, "r")
-    # regular expressions to split strings
-    colon_sep = re.compile(":").split
-    arrow_sep = re.compile("<=>").split
-    plus_sep = re.compile(" \+ ").split
-    keggreactions = []
-    cobra_reactions = []
-    used_metabolites = {}
-    for line in keggfile:
-        [id, reactionstr] = colon_sep(line, maxsplit=1)
-        # remove whitespace
-        id = id.strip()
-        # if the id is in the blacklist, no need to proceed
-        if id in keggblacklist:
-            continue
-        # split into reactants and products
-        reactants_str, products_str = arrow_sep(reactionstr, maxsplit=1)
-        # break up reactant and product strings into arrays of
-        # metabolites and coefficients
-        reactant_metabolites, reactant_coefficients = \
-            _parse_split_array(plus_sep(reactants_str))
-        product_metabolites, product_coefficients = \
-            _parse_split_array(plus_sep(products_str))
-        # reactant coefficients all need to be multiplied by -1
-        for i, coeff in enumerate(reactant_coefficients):
-            reactant_coefficients[i] = coeff * -1
-        # make one array for all compoenents
-        kegg_metabolites = reactant_metabolites
-        coefficients = reactant_coefficients
-        kegg_metabolites.extend(product_metabolites)
-        coefficients.extend(product_coefficients)
-        # translate the metabolites from kegg to cobra
-        metabolites = []
-        try:
-            for the_kegg_metabolite in kegg_metabolites:
-                metabolites.append(keggdict[the_kegg_metabolite])
-        # if one of the metabolites is not found, skip to the next line
-        except KeyError:
-            continue
-
-        # make a Kegg reaction
-        reaction = cobra.Reaction(id)
-        metabolite_dict = {}  # dict of {metabolite : coefficient}
-        for i, the_metabolite in enumerate(metabolites):
-            metabolite_id = the_metabolite + "_" + compartment
-            # if the metabolite already exists
-            if metabolite_id in used_metabolites:
-                used_metabolites[metabolite_id] = coefficients[i]
-            else:
-                # use a new metabolite
-                new_metabolite = cobra.Metabolite(metabolite_id)
-                used_metabolites[metabolite_id] = new_metabolite
-                metabolite_dict[cobra.Metabolite(metabolite_id)] = \
-                    coefficients[i]
-        reaction.add_metabolites(metabolite_dict)
-        reaction.notes["temporary_gapfilling_type"] = "Universal"
-        # because the model will be converted to irreversible
-        reaction.lower_bound = -1 * reaction.upper_bound
-        cobra_reactions.append(reaction)
-    keggfile.close()
-    # add all of the reactions to a cobra model
-    Universal = cobra.Model("Kegg_Universal_Reactions")
-    Universal.add_reactions(cobra_reactions)
-    return Universal
-if __name__ == "__main__":
-    from time import time
-    start_time = time()
-    test_import = import_kegg_reactions()
-    duration = time() - start_time
-    print "imported %d reactions in %.2f sec" % \
-        (len(test_import.reactions), duration)
diff --git a/cobra/oven/aliebrahim/simphenyIO.py b/cobra/oven/aliebrahim/simphenyIO.py
deleted file mode 100644
index 2759be7..0000000
--- a/cobra/oven/aliebrahim/simphenyIO.py
+++ /dev/null
@@ -1,184 +0,0 @@
-from os.path import isfile
-import csv
-import re
-from warnings import warn
-
-import cobra
-
-
-def export_flux_distribution(model, filepath):
-    """Export flux distribution to import into Simpheny.
-
-    Parameters
-    ----------
-    model : cobra.Model
-    filepath: str
-
-    """
-    from simphenyMapping import mapping
-    outfile = open(filepath, "w")
-    outcsv = csv.writer(outfile, delimiter="\t", lineterminator="\n")
-    outcsv.writerow(["Reaction Number", "Flux Value",
-                     "Lower Bound", "Upper Bound"])
-    for reaction_name, reaction_flux in model.solution.x_dict.iteritems():
-        reaction = model.reactions.get_by_id(reaction_name)
-        try:
-            outcsv.writerow([mapping[reaction_name], reaction_flux,
-                reaction.lower_bound, reaction.upper_bound])
-        except KeyError, e:
-            print "Simpheny id number not found for", e
-    outfile.close()
-
-
-def _header_count(filename):
-    """count the number of header lines in a file
-    The header is defined as over when a line is found which begins
-    with a number"""
-    file = open(filename, "r")
-    for i, line in enumerate(file):
-        if line[0].isdigit():
-            file.close()
-            return i
-    file.close()
-    return False
-
-
-def _open_and_skip_header(filename):
-    """returns a csv file with the header skipped"""
-    count = _header_count(filename)
-    if not count:
-        raise (IOError, "%s corrupted" % filename)
-    file = open(filename, "r")
-    for i in range(count):
-        file.readline()
-    return csv.reader(file, delimiter="\t")
-
-
-def _find_metabolites_by_base(base, metabolites):
-    """search for all metabolites in the list which match the base name.
-    For example, "h2o" will identify both "h2o(c)" and "h2o(e)" """
-    search = re.compile(base + "\([a-z]\)")
-    found = []
-    for the_metabolite in metabolites:
-        if search.match(the_metabolite.id) is not None:
-            found.append(the_metabolite)
-    return found
-
-
-def read_simpheny(baseName, min_lower_bound=-1000, max_upper_bound=1000,
-        maximize_info=True):
-    r"""Imports files exported from a SimPheny simulation as a cobra model.
-
-    .. warning:: Use with caution. This is a legacy import function, and
-        errors have been observed in the converted gene-reaction rules.
-
-    Parameters
-    ----------
-    baseName : str
-        The filepath to the exported SimPheny files without any of the
-        extensions. On Windows, it helps if baseName is a raw string
-        (i.e. r"Path\\to\\files")
-    min_lower_bound, max_upper_bound : float or int, optional
-        The bounds on the lower and upper bounds of fluxes in model.
-    maximize_info : bool, optional
-        An optional boolean keyword argument. If True, then an attempt
-        will be made to parse the gpr and metabolite info files, and the
-        function will take a little bit longer.
-
-    Returns
-    -------
-    model : cobra.Model
-        the imported simpheny model
-
-    """
-
-    # check to make sure the files can be read
-    if not(isfile(baseName + ".met")
-        and isfile(baseName + ".rxn")
-        and isfile(baseName + ".sto")):
-        # try again with modifying the baseName
-        baseName = baseName.encode("string-escape")
-        if not(isfile(baseName + ".met")
-            and isfile(baseName + ".rxn")
-            and isfile(baseName + ".sto")):
-            raise (IOError, "Input file(s) not found")
-    model = cobra.Model("SimPheny import from " + baseName)
-    # read in metabolite file
-    metfile = _open_and_skip_header(baseName + ".met")
-    metabolites = []
-    for line in metfile:
-        if len(line) == 0:
-            break
-        metabolite = cobra.Metabolite(id=line[1], name=line[2],
-            compartment=line[3])
-        if maximize_info:
-            compartment_search = re.findall("\([a-z]\)$", metabolite.id)
-            if compartment_search != []:
-                metabolite.compartment = compartment_search[0][1]
-                model.compartments[metabolite.compartment] = line[3]
-        metabolites.append(metabolite)
-    model.add_metabolites(metabolites)
-    # scalefunc will limit the maximum and minumum fluxes
-    scalefunc = lambda x: max(min(max_upper_bound, x), min_lower_bound)
-    # read in reaction file
-    reaction_file = _open_and_skip_header(baseName + ".rxn")
-    reactions = []
-    for line in reaction_file:
-        if len(line) == 0:
-            break
-        the_reaction = cobra.Reaction()
-        the_reaction.id = line[1]
-        the_reaction.name = line[2]
-        if line[3].lower() == "reversible":
-            the_reaction.reversibility = 1
-        elif line[3].lower() == "irreversible":
-            the_reaction.reversibility = 0
-        the_reaction.lower_bound = scalefunc(float(line[4]))
-        the_reaction.upper_bound = scalefunc(float(line[5]))
-        the_reaction.objective_coefficient = float(line[6])
-        reactions.append(the_reaction)
-    model.add_reactions(reactions)
-    # read in S matrix
-    Sfile = _open_and_skip_header(baseName + ".sto")
-    S = []
-    for i, line in enumerate(Sfile):
-        if len(line) == 0:
-            break
-        the_metabolite = metabolites[i]
-        for j, ns in enumerate(line):
-            n = float(ns)
-            if n != 0:
-                model.reactions[j].add_metabolites({the_metabolite: n})
-    # attempt to read in more data
-    infofilepath = baseName + "_cmpd.txt"
-    if maximize_info and isfile(infofilepath):
-        infofile = open(infofilepath, "r")
-        infofile.readline()  # skip the header
-        infocsv = csv.reader(infofile)
-        for row in infocsv:
-            found = _find_metabolites_by_base(row[0], model.metabolites)
-            for found_metabolite in found:
-                found_metabolite.formula = row[2]
-                found_metabolite.parse_composition()
-                found_metabolite.charge = row[4]
-                found_metabolite.notes = {}
-                found_metabolite.notes["KEGG_id"] = row[8]
-                found_metabolite.notes["CAS"] = row[5]
-                found_metaboltie.notes["review status"] = row[3]
-        infofile.close()
-    gpr_filepath = baseName + "_gpr.txt"
-    if maximize_info and isfile(gpr_filepath):
-        warn("SimPheny export files may have errors in the gpr.")
-        # Using this may be risky
-        gpr_file = open(gpr_filepath, "r")
-        gpr_file.readline()  # skip the header
-        gpr_csv = csv.reader(gpr_file)
-        for row in gpr_csv:
-            the_reaction = model.reactions[model.reactions.index(row[0])]
-            the_reaction.gene_reaction_rule = row[5]
-            the_reaction.parse_gene_association()
-        gpr_file.close()
-    # model.update()
-    return model
-
-
diff --git a/cobra/oven/danielhyduke/__init__.py b/cobra/oven/danielhyduke/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/cobra/oven/danielhyduke/construction/balance.py b/cobra/oven/danielhyduke/construction/balance.py
deleted file mode 100644
index 7148187..0000000
--- a/cobra/oven/danielhyduke/construction/balance.py
+++ /dev/null
@@ -1,329 +0,0 @@
-from time import time
-from numpy import zeros, matrix, array
-from scipy.sparse import lil_matrix, dok_matrix
-from cPickle import load, dump
-from collections import defaultdict
-from warnings import warn
-def create_element_matrix(cobra_model, db_cursor=None, me_matrix=False,
-                          cas='sympy'):
-    """Constructs a matrix of elements x metabolites for the metabolites
-    in cobra_model.  If elemental compositions are not available for all
-    metabolites then a symbolic matrix is returned with symbols representing
-    the unknown compositions.
-
-    cobra_model:  A cobra.Model object.
-
-    db_cursor:  Internal use only.
-
-    me_matrix: False. Internal use only.
-
-    cas: 'sympy' or 'ginac'.  Specifies the computer algebra system to use.
-    'ginac' is the more powerful solver however it is accessed through
-    swiginac which isn't the easiest thing to install.
-
-    """
-    if cas.lower() == 'sympy':
-        from sympy import solve, Matrix, Symbol, Add
-        from sympy.core.numbers import Zero, Real
-    elif cas.lower() == 'ginac':
-        #Symbolic.Matrix might be slow to index so we might use
-        #the swiginac matrix or numpy.matrix instead
-        from Symbolic import Symbol, Matrix, Expr
-                
-        
-    elements = ('c', 'h', 'o', 'n', 'p', 's', 'z') #z is for generics
-    element_dict = dict(zip(elements, range(len(elements))))
-    element_matrix = dok_matrix((len(elements), len(cobra_model.metabolites)))
-    if db_cursor and me_matrix:
-        #Used for current incarnation of ME matrix.  
-        known_compositions = set()
-        #1. Start off by getting known molecular compositions
-        db_cursor.execute('Select Id, (c, h, o, n, p, s) from metabolite')
-        metabolite_compositions = dict(db_cursor.fetchall())
-        for the_metabolite, the_composition in metabolite_compositions.items():
-            if the_metabolite in cobra_model.metabolites:
-                known_compositions.add(the_metabolite)
-                the_column = cobra_model.metabolites.index(the_metabolite)
-                the_composition = eval(the_composition)
-                element_matrix.update(dict([((i, the_column), the_composition[i])
-                                            for i in range(len(the_composition))]))
-
-        #2. Identify the reactions that produce generics and set the stoichiometry
-        #to zero to deal with mass imbalances.
-        #
-        #This isn't a problem any more as the dummy reactions are not added
-        #until after balancing.
-        generic_production_reactions = dict([(x, cobra_model.reactions.index(x))
-                                             for x in cobra_model.reactions\
-                                             if x.startswith('generic_rename')])
-        #collect all of the generic_X metabolite Ids and then set masses to 1 Z
-        db_cursor.execute('SELECT FU_ID_Generic from Generic_FU')
-        generic_metabolites = [x[0] for x in db_cursor.fetchall()]
-
-        known_compositions.update(generic_metabolites)
-        element_matrix.update(dict([((elements.index('z'),
-                                      cobra_model.metabolites.index(x)), 1.)
-                                    for x in generic_metabolites]))
-        #Remove the generic production reactions
-        for column_index in generic_production_reactions.values():
-            the_column = cobra_model._S[:, column_index]
-            for row_index in the_column.nonzero()[0]:
-                the_column[row_index, 0] = 0
-
-        #3. Remove demand reactions.
-        #This isn't a problem any more as the demand reactions are not added
-        #until after balancing.
-        demand_indices = [cobra_model.reactions.index(x)
-                          for x in cobra_model.reactions if 'demand' in x]
-        for column_index in demand_indices:
-            the_column = cobra_model._S[:, column_index]
-            for row_index in the_column.nonzero()[0]:
-                the_column[row_index, 0] = 0
-        #4. Calculate molecular formula for transcripts and peptides.  This isn't
-        #necessary, but will probably make solving the problem easier.
-
-        #known_compositions.update(transcripts and peptides)
-
-        #5. For all metabolites not in known compositions.  It will be
-        #necessary to add a symbolic value to the matrix for each element
-        #excluding z, i.e.  the_metabolite_(c, h, o, n, p, s, z). 
-        metabolite_dict = dict(zip(cobra_model.metabolites,
-                                   range(len(cobra_model.metabolites))))
-        [metabolite_dict.pop(k)
-         for k in known_compositions]
-        #If there are any metabolites without compositions build a symbolic matrix
-        if len(metabolite_dict) > 0:
-            #Build the symbolic elemental composition matrix
-            if cas.lower() == 'sympy':
-                element_matrix = Matrix(element_matrix.todense())
-            elif cas.lower() == 'ginac':
-                element_matrix = Matrix(element_matrix.todense().tolist())
-            #Now populate with symbols for the_metabolites not in known_compositions
-            for the_metabolite, metabolite_index in metabolite_dict.items():
-                for the_element, element_index in element_dict.items():
-                    element_matrix[element_index,
-                                   metabolite_index] = Symbol('%s_%s'%(the_metabolite,
-                                                                       the_element))
-    else:
-        print 'Not yet implemented for anything other than ME'
-
-    return({'elements': element_dict,
-            'matrix': element_matrix})
-
-
-
-
-
-def create_balancing_problem(cobra_model, element_vector,
-                             system_type='equations',
-                             cas='sympy', print_unbalanced=False):
-    """Create a symbolic linear algebra problem to solve for unknown metabolite
-    compositions in cobra_model.
-
-      element_vector x cobra_model._S.T = 0
-
-      Note: The problems are typically too big to balance for all elements
-      at once, thus the vector should only deal with one of the chemical elements.
-
-    cobra_model: A cobra.Model object
-
-    element_vector: A sympy.Matrix with the element counts corresponding to each
-    metabolite in cobra_model.reactions.
-
-    return_type: 'equations' or 'matrix'.  Constructing equations is faster, but
-    for specific operations a matrix may be desired.  If 'matrix' then the
-    unbalanced reactions are not currently returned.
-
-    cas: 'sympy' or 'ginac'.  Specifies the computer algebra system to use.
-    'ginac' is the more powerful solver however it is accessed through
-    swiginac which isn't the easiest thing to install.
-
-    print_unbalanced: Boolean.  Indicates whether to print the unbalanced reactions.
-    
-    """
-    if cas.lower() == 'sympy':
-        from sympy import solve, Matrix, Symbol, Add
-        from sympy.core.numbers import Zero, Real
-    elif cas.lower() == 'ginac':
-        from Symbolic import Symbol, Matrix, Expr
-        from swiginac import lsolve, matrix, symbol
-        from swiginac import add as Add
-    #  If multiple solutions are available, then we'll have to make
-    # sure that the sum of each column in element_matrix is >= 1
-    #
-    #Now deal with multiplying the two matrices.
-    unbalanced_dict = {}
-    variable_set = set()
-    if system_type == 'equations':
-        s_matrix_transpose = cobra_model._S.T #Row access is faster for sparse arrays
-        #Multiplying symbolic_element_matrix by s_matrix is not an option, due to
-        #memory and speed issues.
-        the_system = []
-
-
-        for i in range(s_matrix_transpose.shape[0]):
-            the_column = s_matrix_transpose[i, :] 
-            #This is faster than multiplying symbolic_element_matrix by
-            #the_column by about 3-fold
-            the_indices = the_column.nonzero()[1]
-            the_factors = [float(the_column[0, x])
-                           for x in the_indices]
-            if cas.lower() == 'sympy':
-                the_variables = [element_vector[0,j]
-                                 for j in the_indices]
-            elif cas.lower() == 'ginac':
-                the_variables = [element_vector[j]
-                                 for j in the_indices]
-
-            the_equation = reduce(lambda x,y: x + y,
-                                   map(lambda x, y: x*y, the_factors,
-                                       the_variables))
-            if cas.lower() == 'sympy':
-                #this can probably be streamlined for the different CASes
-                if isinstance(the_equation, Add):
-                    the_system.append(the_equation)
-                    the_atoms = list(the_equation.atoms())
-                    [variable_set.add(x) for x in the_atoms
-                     if isinstance(x, Symbol)]
-                elif not isinstance(the_equation, Zero) and \
-                         the_equation != 0:
-                    unbalanced_dict.update({cobra_model.reactions[i]:
-                                            the_equation})
-                    if print_unbalanced:
-                        print 'Unbalanced Reaction ' +\
-                              '%s: Element %s is %s'%(cobra_model.reactions[i],
-                                                      the_element,
-                                                      repr(the_equation))
-            elif cas.lower() == 'ginac':
-                if isinstance(the_equation.data, Add):
-                    the_system.append(the_equation)
-                    [variable_set.add(x) for x in the_variables
-                     if isinstance(x, Expr)]
-                elif the_equation.eval() != 0:
-                    unbalanced_dict.update({cobra_model.reactions[i]:
-                                            the_equation})
-                    if print_unbalanced:
-                        print 'Unbalanced Reaction ' +\
-                              '%s: Element %s is %s'%(cobra_model.reactions[i],
-                                                      the_element,
-                                                      repr(the_equation))
-
-    else:
-        print 'Warning this may take 10 Gb RAM and an hour'
-        the_system = element_vector * cobra_model._S.todense()
-
-    return({'variables': variable_set,
-            'equations': the_system,
-            'unbalanced': unbalanced_dict})
-
-
-def solve_balance_problem(the_equations, the_variables):
-    """Solves a systems of linear equations for the variables. Using
-    sympy.
-
-    the_equations: A list of sympy.Add equations.
-
-    the_variables: A list of sympy variables (Symbols, Zero, Real, One, ...)
-    
-    """
-    if cas.lower() == 'sympy':
-        from sympy import solve
-    elif cas.lower() == 'ginac':
-        from swiginac import lsolve as solve
-    the_solution = solve(the_equations, the_variables)    
-    return the_solution
-
-
-if __name__ == '__main__':
-    from sys import argv
-    from os.path import lexists
-    from time import time
-    ## if not len(argv) == 3:
-    ##     print 'Need to call the script with the model file name and element'
-    ##     print 'e.g. python balance.py cobra_model.pickle c'
-    ## model_file = argv[1]
-    ## the_element = argv[2]
-    cas='ginac'
-    me_matrix = True
-    the_element = 'c'
-    model_file = '/Users/danie/e/builds/cobra_model.pickle'
-    system_type = 'equations'
-    element_file = '%s.elements.%s'%(model_file, cas)
-    problem_file = '%s.%s_problem.%s'%(model_file,
-                                       the_element,
-                                       cas)
-    solution_file = '%s.%s_solution.%s'%(model_file,
-                                       the_element,
-                                       cas)
-
-
-    with open(model_file) as in_file:
-        cobra_model = load(in_file)
-
-    if lexists(element_file) and not lexists(problem_file):
-        #Only load the element file if a problem file does not
-        #already exist to reduce memory usage.
-        with open(element_file) as in_file:
-            the_elements = load(in_file)
-    elif not lexists(problem_file):
-        #Only build the elements if the problem file doesn't exist
-        import pgdb as PgSQL
-        start_time = time()
-        db_con = PgSQL.connect(database='cobra')
-        db_cursor = db_con.cursor()
-        the_genus = 'thermotoga'
-        db_cursor.execute('Set search_path to ' + the_genus)
-        print 'Building element matrix'
-        start_time = time()
-        the_elements = create_element_matrix(cobra_model, db_cursor=db_cursor,
-                                             me_matrix=True, cas=cas)
-        if cas.lower() != 'ginac':
-            #Can't pickle PySwigObjects
-            with open(element_file, 'w') as out_file:
-                dump(the_elements, out_file) 
-        print 'Element matrix %s created in %f minutes'%(element_file,
-                                                       (time()-start_time)/60)
-    element_index = the_elements['elements'][the_element]
-    element_vector = the_elements['matrix'][element_index, :]
-    print 'This problem is symbolic and may take some time to solve'
-    if not lexists(problem_file):
-        print 'Constructing the problem for element %s. Restart to solve the problem'%the_element
-        print 'This process can take 1-100 minutes depending on model size'
-        start_time = time()
-        
-        the_problem = create_balancing_problem(cobra_model,
-                                               element_vector,
-                                               system_type=system_type,
-                                               cas=cas,
-                                               print_unbalanced=True)
-        if cas.lower() != 'ginac':
-            print 'Rerun the script to solve the problem'
-            with open(problem_file, 'w') as out_file:
-                dump(the_problem, out_file)
-        print 'Problem file %s created in %1.2f minutes.'%(problem_file,
-                                                            (time() - start_time) / 60)
-    
-    ## else:
-    ##     with open(problem_file) as in_file:
-    ##         the_problem = load(in_file)
-
-    ##     print 'Solving the problem %s'%problem_file
-    ##     print 'This may take some time'
-    ##     start_time = time()
-    ##     the_solution = solve_balance_problem(the_problem['equations'],
-    ##                                          the_problem['variables'])
-
-    ##     with open(solution_file, 'w') as out_file:
-    ##         dump(the_solution, out_file)
-    ##     print 'Problem solved. %s created in %1.2f minutes.'%(solution_file,
-    ##                                                           (time() - start_time) / 60) 
-    
-#solve nonsymbolic problems
-        ## element_matrix = the_elements['matrix'].tocsr()
-        ## elements = the_elements['elements']
-        ## reaction_matrix = cobra_model._S.T.tocsr()
-        ## print 'Not a symbolic problem'
-        ## the_balance = element_matrix * reaction_matrix
-        ## the_balance = the_balance.tolil()
-        ## for e, i in elements:
-        ##     print '%s generated by system: %f'%(e, the_balance[i,:].sum())
diff --git a/cobra/oven/danielhyduke/construction/omics_guided.py b/cobra/oven/danielhyduke/construction/omics_guided.py
deleted file mode 100644
index c6d4ca1..0000000
--- a/cobra/oven/danielhyduke/construction/omics_guided.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#cobra.manipulation.omics_guided.py
-#The functions for omics_guided tailoring will be kept here.
-def tailor_model(cobra_model, the_method='GIMME', data_type='mRNA', data_kind='log_ratio',
-                 solver='glpk', the_problem='return' ):
-    """
-
-    the_method: Type of tailoring to employ.  GIMME or shlomi.
-    data_type: 'mRNA', 'protein', 'metabolite', ...
-    data_kind: 'p-value','log_ratio': assumed vs control, 'intensity'
-    solver: 'glpk' or 'gurobi'
-    
-    
-    """
-    cobra_model = cobra_model.copy()
-    print 'Under development'
-    return
-
-#function [reactionActivity,reactionActivityIrrev,model2gimme,gimmeSolution] = solveGimme(model,objectiveCol,expressionCol,cutoff)
diff --git a/cobra/oven/danielhyduke/general/__init__.py b/cobra/oven/danielhyduke/general/__init__.py
deleted file mode 100644
index c82945a..0000000
--- a/cobra/oven/danielhyduke/general/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from arrays import *
diff --git a/cobra/oven/danielhyduke/general/arrays.py b/cobra/oven/danielhyduke/general/arrays.py
deleted file mode 100644
index 7dc6274..0000000
--- a/cobra/oven/danielhyduke/general/arrays.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from numpy import ndarray
-class ResultsArray(ndarray):
-    """A primitive wrapper to allow accessing numpy.ndarrays via
-    named rows and columns.  The ResultsArray.row_names and
-    column_names must be assigned after the object is created.
-
-    The names will not carry over for any operations.
-
-    TODO: Finish the implementation
-
-    """
-    def __init__(self, shape, row_names=None, column_names=None):
-        ndarray.__init__(shape)
-        if row_names:
-            self.row_names = row_names
-        else:
-            self.row_names = range(shape[0])
-        if column_names:
-            self.column_names = column_names
-        else:
-            column_names = range(shape[1])
-    def get(self, row_name=None, column_name=None):
-        if row_name:
-            the_row = self.row_names.index(row_name)
-        if column_name:
-            the_column = self.column_names.index(column_name)
-        if row_name and column_name:
-            return self[the_row, the_column]
-        if not row_name:
-            return self[:, the_column]
-        if not column_name:
-                return self[the_row, :]
diff --git a/cobra/oven/danielhyduke/jython/README b/cobra/oven/danielhyduke/jython/README
deleted file mode 100644
index d4f214f..0000000
--- a/cobra/oven/danielhyduke/jython/README
+++ /dev/null
@@ -1 +0,0 @@
-Section dedicated to creating scipy/numpy for jython interface using cern.colt.  This will not be ready any time soon.
diff --git a/cobra/oven/danielhyduke/jython/__init__.py b/cobra/oven/danielhyduke/jython/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/cobra/oven/danielhyduke/jython/numpy/README b/cobra/oven/danielhyduke/jython/numpy/README
deleted file mode 100644
index 6d358ed..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/README
+++ /dev/null
@@ -1 +0,0 @@
-In the future, we will be working on a numpy for java implementation that uses cern.colt matrices as the backend and provides a java interface that mirrors numpy.
diff --git a/cobra/oven/danielhyduke/jython/numpy/__init__.py b/cobra/oven/danielhyduke/jython/numpy/__init__.py
deleted file mode 100644
index 5caa494..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from cern.colt.matrix.impl import DenseDoubleMatrix2D as ndarray
-from cern.colt.matrix.impl import SparseDoubleMatrix2D as sdarray
-from core import *
-#import core
-#from core import *
diff --git a/cobra/oven/danielhyduke/jython/numpy/core/Matrix.py b/cobra/oven/danielhyduke/jython/numpy/core/Matrix.py
deleted file mode 100644
index fc70437..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/core/Matrix.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#cobra.numjy.Matrix.py
-#Basic matrix class that is going to be used to mimic numpy.ndarray
-#capabilities.
-#
-#Derived from Simon Galbraith's ncajava Matrix.py file
-import java, javax, jarray
-from copy import deepcopy
-from cern.colt.matrix import DoubleMatrix2D
-from cern.colt.matrix.DoubleFactory2D import dense,sparse;
-from cern.colt.matrix.impl import DenseDoubleMatrix2D as ndarray
-from cern.colt.matrix.impl import SparseDoubleMatrix2D as sdarray
-from cern.colt.matrix.linalg import Algebra;
-from org.python.core.exceptions import ValueError as PyValueException;
-from org.python.core import PyString,PySlice,PySequence,PyList;
-
-class Matrix(javax.swing.table.AbstractTableModel):
-    _M = None;   
-    _name = 'data matrix'  
-    varname = ''
-    column_names = []
-    row_names = []
-           
-    def __init__(self,M=None,N=None,v=None,sparse=None):
-
-        """
-        M is the number of rows
-        N is the number of columns
-        v is the default value
-        """
-        if isinstance(M,DoubleMatrix2D):
-            self._M = M.copy();  
-        elif (isinstance(M,int) and isinstance(N,int)):
-            if sparse:
-                F = sparse
-            else:
-                F = dense;
-            if v is None:
-                self._M = F.make(M,N,0);
-            elif isinstance(v,int): 
-                self._M = F.make(M,N,v);
-            elif isinstance(v,PyList):
-                self._M = F.make(jarray.array(v,'d'),1)
-            elif isinstance(v,PyString):
-                self._M = F.random(M,N);
-            else:
-                if sparse:
-                    self._M = SparseDoubleMatrix2D(v)
-                else:
-                    self._M = ndarray(v)
-        self.shape = (self._M.rows(),self._M.columns())
-
-
-    def __copy__(self):
-        r = new.instance(self.__class__, self.__dict__.copy() )
-        r._M = self._M.copy();
-        print "in copy"
-        return r
-    
-    def __sub__(A,B):
-        
-        [ar,ac]=size(A);        
-        C = Matrix(ar,ac,0);
-        for i in range(1,ar):
-            for j in range(1,ac):
-                C[i,j]=A[i,j]-B[i,j]
-        return C;
-    
-    def __mul__(A,B):  
-        
-        # need to check types and multiple based on them..     
-        try:
-            F = Algebra();
-            C=(F.mult(A._M,B._M));
-        except:
-            raise PyValueException, "Inner dimension mismatch in matrix multiply.";
-            return None;
-        return Matrix(C) 
-    def __div__(A,B):
-        #print size(A)
-
-        try:
-            F = Algebra();
-            R = F.solve(A._M,B._M);
-            return R;
-        except (java.lang.IllegalArgumentException) , e :
-            # check the error class types according to the matrix class so we can intelligently report the error.
-            print e.getMessage();
-            return None;        
-        
-    
-    def __repr__(self):
-        return self._M.toString();
-            
-    def __str__(self):
-        return self._M.toString();
-
-    def __sz__(self):        
-        if isinstance(self,Matrix):
-            x=self._M.rows();
-            y=self._M.columns();
-            return (x,y);
-        else:
-            raise PyValueException, "Argument must be a matrix.";   
-           
-    def __setitem__(self,idx,v):
-
-        if v is None:
-            print idx
-            raise PyValueException, "v is none"
-        
-        if isinstance(v,float): 
-            self._M.set(idx[0],idx[1],v);
-            return;
-        
-        Y=idx[1];
-        X=idx[0];
-            
-        if isinstance(X,PyList):
-            X=map(lambda x: x, X)
-        elif isinstance(X,PySlice):
-            if X.start == None:
-               X=range(0,self._M.rows())
-        elif isinstance(X,int):
-            X=[X];
-        
-        if isinstance(Y,PyList):
-            Y=map(lambda x: x,Y);
-        elif isinstance(Y,PySlice):
-            if Y.start == None:
-               Y=range(0,self._M.cols())        
-        elif isinstance(Y,int):
-            Y=[Y];
-
-        order=0;
-        if len(X)>len(Y):
-            order=1;
-            
-        #print "the order is " , order    
-        if order==0:
-            y=1
-            for q in Y:
-                    x=1
-                    for z in X: 
-         #               print z,q,x,y,v
-                        self._M.set(z,q,v[x,y]);
-                        x+=1;
-                    y+=1;
-        else:
-            x=1
-            for z in X:                    
-                    y=1;
-                    for q in Y:             
-                        self._M.set(z,q,v[x,y]);
-                        y+=1;
-                    x+=1;
-            
-                
-    def __getslice__(self, i, j):
-
-        if i.start != None:
-            x=range(i.start,i.stop);
-        else:
-            x=range(0,self._M.rows())
-        if j.start !=None:    
-            y=range(j.start,j.stop)
-        else:
-            y=range(0,self._M.columns())
-        
-        return Matrix(self._M.viewSelection(x,y));
-
-         
-    def __getitem__(self,idx):
-        x=idx[0];
-        y=idx[1];
-        if x<0 or y<0:
-            raise PyValueException, "Index must be positive number";     
-          # this will fail on pyslice
-        
-        if isinstance(x,PySlice):
-            if x.start != None:
-               x=range(x.start,x.stop);
-            else:
-               x=range(0,self._M.rows())                   
-        elif isinstance(x,int):
-            x=x;
-            x=[x]; 
-        elif isinstance(x,PyList):
-            x=map(lambda x: x, x)
-       
-        if isinstance(y,int):
-               y=y;
-               y=[y];
-        elif isinstance(y,PySlice):
-            if y.start != None:
-                y=range(y.start,y.stop);
-            else:
-                y=range(0,self._M.columns())         
-        elif isinstance(y,PySlice):
-            if y.start !=None:    
-                y=range(y.start,y.stop)
-            else:
-                y=range(0,self._M.columns())
-        elif isinstance(y,PyList):
-            y=map(lambda x: x, y)
-
-        
-        if len(x)<2 and len(y)<2:
-            r = self._M.getQuick(x[0],y[0]);
-            return float(r)  # this is a specific element
-        else:
-            return Matrix(self._M.viewSelection(x,y));
diff --git a/cobra/oven/danielhyduke/jython/numpy/core/__init__.py b/cobra/oven/danielhyduke/jython/numpy/core/__init__.py
deleted file mode 100644
index 347a592..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/core/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from multiarray import *
-from core import *
-#from numeric import *
diff --git a/cobra/oven/danielhyduke/jython/numpy/core/core.py b/cobra/oven/danielhyduke/jython/numpy/core/core.py
deleted file mode 100755
index fb60394..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/core/core.py
+++ /dev/null
@@ -1,582 +0,0 @@
-# (c) Simon J Galbraith 2007.
-# Daniel Hyduke 2010
-import java, jarray
-from copy import deepcopy
-from cern.colt.list import IntArrayList, DoubleArrayList
-from cern.colt.matrix import DoubleFactory2D
-from cern.colt.matrix.impl import DenseDoubleMatrix2D
-from cern.colt.matrix.impl import SparseDoubleMatrix2D
-from cern.colt.matrix.linalg import Algebra, EigenvalueDecomposition, LUDecomposition;
-from org.python.core.exceptions import ValueError as PyValueException;
-from cern.colt.matrix.doublealgo.Statistic import covariance, correlation;
-from cern.jet.math.Functions import abs as cern_abs
-from multiarray import ndarray, array
-def mean(A):
-    """Calculate the mean of Matrix object A
-
-    """
-    [r,c]=size(A)
-    s=0
-    for j in range(1,c):
-        s = s+A[r,j]
-    return s/c
-
-def sum(A):
-    """Calculate the sum of Matrix object A
-
-    """
-    return A._M.zSum()
-
-
-def norm(A,ntype=None):
-    F = Algebra();
-    if ntype=='fro':
-        r=F.normF(A._M);
-    elif ntype == 2:
-        r=F.norm2(A._M);
-    else:
-        r=F.norm2(A._M);
-    return r;
-
-def rank(A):
-    
-    if isinstance(A,ndarray):
-        F = Algebra();
-        r=F.rank(A._M);
-        return int(r);
-    else:
-        raise PyValueException, "Rank function can only be called on matrix objects"
-
-def cond(A):
-    F = Algebra();
-    return F.cond(A._M);
-
-def size(A):
-    return A.__sz__();
-
-def transpose(A):
-    F = Algebra();
-    if isinstance(A,float):
-        return A;
-    else:
-        return ndarray(F.transpose(A._M));
-
-def inverse(A):
-    F = Algebra();
-    x=F.inverse(A._M);
-    return ndarray(x)
-
-
-def eig(A):
-        # check that _M is square
-    try:
-        E = EigenvalueDecomposition(A._M);
-        U = E.getV();
-        eigs = E.getD();
-    except PyValueException, e:
-        print e;
-        raise PyValueException,"Error in eig, check warnings()";  
-    return [ndarray(eigs),ndarray(U)];
-
-def solve(A, B):
-    F = Algebra();
-    if isinstance(A, ndarray) and isinstance(B, float):
-            return F.solve(A._M, B);
-    elif isinstance(B, ndarray) and  isinstance(A, float):
-            return F.solve(A, B._M);
-    elif isinstance(A,ndarray) and isinstance(B, ndarray):
-            return ndarray(F.solve(A._M, B._M))
-    else:
-        return A / B
-
-def solve_transpose(A, B):
-    F = Algebra();
-    if isinstance(A, ndarray) and isinstance(B, float):
-            return F.solveTranspose(A._M,B);
-    elif isinstance(B, ndarray) and  isinstance(A, float):
-            return F.solveTranspose(A, B._M);
-    elif isinstance(A, ndarray) and isinstance(B, ndarray):
-            return ndarray(F.solveTranspose(A._M, B._M));
-    else:
-        return A / B
-
-def solve_LR(A, B):
-    T = A._M.copy()
-    F = LUDecomposition(T);
-    if isinstance(A, ndarray) and isinstance(B, float):
-            return F.solve(B);
-    elif isinstance(A, ndarray) and isinstance(B, ndarray):
-        C = F.solve(B._M);
-        return ndarray(C)
-
-def cov(A):
-    return ndarray(covariance(A._M))
-
-def cor(A):  # handle multidimensional matrix case
-    B = cov(A);
-    return  ndarray(correlation(B._M))
-
-
-def abs(A):
-    F = cern_abs
-    if isinstance(A,float):
-        return java.lang.Math.abs(A)
-    else:
-        X = A._M.assign(F)
-        return ndarray(X);
-
-def svd(A):
-    X = SingularValueDecomposition(A._M)
-    u = X.getU()
-    v = X.getV()
-    e = X.getS()
-    return [ndarray(u), ndarray(e), ndarray(v)]
-
-#TODO:
-#Make sure all of the functions below are are defined.  To know
-#how they should operate, look at the docstring:
-# pydoc numpy.the_function
-#
-# or in ipython:
-# from numpy import *
-# ?the_function
-#
-#
-#TODO: These are the java mappings to the type.  If there's any
-#difficulties then check the cern.colt data types
-int32 = int
-int64 = long
-
-def ones(shape, dtype=float, order='C'):
-    """Return a new array of given shape and type, filled with ones.
-    
-     
-    See Also
-    --------
-    zeros
-    
-    Examples
-    --------
-    >>> numjy.ones(5)
-    array([ 1.,  1.,  1.,  1.,  1.])
-    
-    >>> numjy.ones((5,), dtype=numjy.int)
-    array([1, 1, 1, 1, 1])
-    
-    >>> numjy.ones((2, 1))
-    array([[ 1.],
-           [ 1.]])
-    
-    >>> s = (2,2)
-    >>> numjy.ones(s)
-    array([[ 1.,  1.],
-           [ 1.,  1.]])"""
-    return(ndarray(shape[0], shape[1], 1))
-
-def sign(x):
-    """ sign(x[, out])
-    
-    Returns an element-wise indication of the sign of a number.
-    
-    The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
-    
-    Parameters
-    ----------
-    x : array_like
-      Input values.
-    
-    Returns
-    -------
-    y : ndarray
-      The sign of `x`.
-    
-    Examples
-    --------
-    >>> numjy.sign([-5., 4.5])
-    array([-1.,  1.])
-    >>> numjy.sign(0)
-    0
- 
-    """
-    def sign_int(a_number):
-        if a_number < 0:
-            return(-1)
-        elif a_number == 0:
-            return(0)
-        else:
-            return(1)
-
-    if hasattr(x, '__iter__' ) or isinstance(x, ndarray):
-        return_value = array([map(sign_int, list(array(x)._M.toArray()))])
-    else:
-        #In the case the input is just a number return the sign of the number.
-        return_value = sign_int(x)
-    return(return_value)       
-
-
-def vstack(tup):
-    """     Stack arrays in sequence vertically (row wise).
-    
-    Take a sequence of arrays and stack them vertically to make a single
-    array. Rebuild arrays divided by `vsplit`.
-    Parameters
-    ----------
-    tup : sequence of ndarrays
-        Tuple containing arrays to be stacked. The arrays must have the same
-        shape along all but the first axis.
-    Returns
-    -------
-    stacked : ndarray
-        The array formed by stacking the given arrays.
-    See Also
-    --------
-    hstack : Stack arrays in sequence horizontally (column wise).
-    dstack : Stack arrays in sequence depth wise (along third dimension).
-    concatenate : Join a sequence of arrays together.
-    vsplit : Split array into a list of multiple sub-arrays vertically.
-    
-    
-    Notes
-    -----
-    Equivalent to ``np.concatenate(tup, axis=0)``
-    
-    Examples
-    --------
-    >>> a = np.array([1, 2, 3])
-    >>> b = np.array([2, 3, 4])
-    >>> np.vstack((a,b))
-    array([[1, 2, 3],
-           [2, 3, 4]])
-    
-    >>> a = np.array([[1], [2], [3]])
-    >>> b = np.array([[2], [3], [4]])
-    >>> np.vstack((a,b))
-    array([[1],
-           [2],
-           [3],
-           [2],
-           [3],
-           [4]])
-
-           """
-    if isinstance(tup[0], sdarray):
-        matrix_factory = DoubleFactory2D.sparse
-    else:
-        #Allow for the case that python lists or tuples are fed to the function
-        tup = map(array, tup)
-        matrix_factory = DoubleFactory2D.dense
-
-    stacked_matrix = tup[0]._M
-    for the_array in tup[1:]:
-        stacked_matrix = matrix_factory.appendRows(stacked_matrix, the_array._M)
-    return(ndarray(stacked_matrix))
-
-
-def hstack(tup):
-    """
-    Stack arrays in sequence horizontally (column wise).
-
-    Take a sequence of arrays and stack them horizontally to make a single array. Rebuild arrays divided by hsplit.
-
-    Parameters:
-    tup : sequence of ndarrays
-    All arrays must have the same shape along all but the second axis.
-    Returns:
-    stacked : ndarray
-    The array formed by stacking the given arrays.
-    See also
-    vstack
-    Stack arrays in sequence vertically (row wise).
-    dstack
-    Stack arrays in sequence depth wise (along third axis).
-    concatenate
-    Join a sequence of arrays together.
-    hsplit
-    Split array along second axis.
-    Notes
-
-    Equivalent to np.concatenate(tup, axis=1)
-
-    Examples
-
-    >>> a = np.array((1,2,3))
-    >>> b = np.array((2,3,4))
-    >>> np.hstack((a,b))
-    array([1, 2, 3, 2, 3, 4])
-    >>> a = np.array([[1],[2],[3]])
-    >>> b = np.array([[2],[3],[4]])
-    >>> np.hstack((a,b))
-    array([[1, 2],
-           [2, 3],
-           [3, 4]])
-    """
-
-    if isinstance(tup[0], sdarray):
-        matrix_factory = DoubleFactory2D.sparse
-    else:
-        tup = map(array, tup)
-        matrix_factory = DoubleFactory2D.dense
-
-    hstacked_matrix = tup[0]._M
-    for the_array in tup[1:]:
-        hstacked_matrix = matrix_factory.appendColumns(hstacked_matrix, the_array._M)
-    return(ndarray(hstacked_matrix))
-
-
-def where (condition, x=None, y=None):
-    """
-    Return a masked array with elements from x or y, depending on condition.
-
-    Returns a masked array, shaped like condition, where the elements
-    are from `x` when `condition` is True, and from `y` otherwise.
-    If neither `x` nor `y` are given, the function returns a tuple of
-    indices where `condition` is True (the result of
-    ``condition.nonzero()``).
-
-    Parameters
-    ----------
-    condition : array_like, bool
-        The condition to meet. For each True element, yield the corresponding
-        element from `x`, otherwise from `y`.
-    x, y : array_like, optional
-        Values from which to choose. `x` and `y` need to have the same shape
-        as condition, or be broadcast-able to that shape.
-
-    Returns
-    -------
-    out : MaskedArray or tuple of ndarrays
-        The resulting masked array if `x` and `y` were given, otherwise
-        the result of ``condition.nonzero()``.
-
-    See Also
-    --------
-    numpy.where : Equivalent function in the top-level NumPy module.
-
-    Examples
-    --------
-    >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
-    ...                                                    [1, 0, 1],
-    ...                                                    [0, 1, 0]])
-    >>> print x
-    [[0.0 -- 2.0]
-     [-- 4.0 --]
-     [6.0 -- 8.0]]
-    >>> np.ma.where(x > 5)    # return the indices where x > 5
-    (array([2, 2]), array([0, 2]))
-
-    >>> print np.ma.where(x > 5, x, -3.1416)
-    [[-3.1416 -- -3.1416]
-     [-- -3.1416 --]
-     [6.0 -- 8.0]]
-
-    """
-
-##     if x is None and y is None:
-##         return filled(condition, 0).nonzero()
-##     elif x is None or y is None:
-##         raise ValueError, "Either both or neither x and y should be given."
-##     # Get the condition ...............
-##     fc = filled(condition, 0).astype(MaskType)
-##     notfc = np.logical_not(fc)
-##     # Get the data ......................................
-##     xv = getdata(x)
-##     yv = getdata(y)
-##     if x is masked:
-##         ndtype = yv.dtype
-##     elif y is masked:
-##         ndtype = xv.dtype
-##     else:
-##         ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
-##     # Construct an empty array and fill it
-##     d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
-##     _data = d._data
-##     np.putmask(_data, fc, xv.astype(ndtype))
-##     np.putmask(_data, notfc, yv.astype(ndtype))
-##     # Create an empty mask and fill it
-##     _mask = d._mask = np.zeros(fc.shape, dtype=MaskType)
-##     np.putmask(_mask, fc, getmask(x))
-##     np.putmask(_mask, notfc, getmask(y))
-##     _mask |= getmaskarray(condition)
-##     if not _mask.any():
-##         d._mask = nomask
-##     return d
-
-
-
-
-
-
-
-
-## def where(condition  , tup = 'blank', arg3 = 'blank'):
-
-
-
-
-
-
-
-
-##     def conditioncheck(x):
-##         if x == 'True':
-##             return(1)
-##         else:
-##             return(0)
-
-##     if isinstance(condition, bool):
-##         truth_matrix = zeros(z.columns(), z.rows())
-##         for i in xrange(truth_matrix.rows):
-##             for j in xrange(truth_matrix.columns):
-##                 truth_matrix.set(i,j, conditioncheck(z.get(i,j)))
-##     else:
-##         truth_matrix = zeros((len(condition), len(condition[0])))._M
-##         for i in xrange(truth_matrix.rows()):
-##             for j in xrange(truth_matrix.columns()):
-##                 truth_matrix.set(i,j, conditioncheck(condition[i][j]))
-
-##     if arg3 == 'blank':
-##         if tup == 'blank':
-##                 row_list = IntArrayList()
-##                 column_list = IntArrayList()
-##                 coordinate_list = DoubleArrayList()
-##                 truth_matrix.getNonZeros(row_list, column_list, coordinate_list)
-##                 return( (array(row_list), array(column_list)) )
-##         else:
-##             value_matrix = list(tup)
-##             for i, x in enumerate(value_matrix):
-##                 if isinstance(x, int):
-##                     matfac = DoubleFactory2D.dense
-##                     matrix = matfac.make(truth_matrix.rows(), truth_matrix.columns(), x)
-##                     value_matrix[i] = matrix
-##                 else:
-##                     value_matrix[i] = x._M
-##             return_array = zeros((truth_matrix.rows(), truth_matrix.columns()))._M
-##             for i in xrange(truth_matrix.rows()):
-##                 for j in xrange(truth_matrix.columns()):
-##                     if truth_matrix.get(i,j) == 1:
-##                         return_array.set(i,j, value_matrix[0].get(i,j))
-##                     else:
-##                         return_array.set(i,j, value_matrix[1].get(i,j))
-##     else:
-##         value_matrix = [ tup , arg3 ]
-##         for i, x in enumerate(value_matrix):
-##             if isinstance(x, int):
-##                 matfac = DoubleFactory2D.dense
-##                 matrix = matfac.make(truth_matrix.rows(), truth_matrix.columns(), x)
-##                 value_matrix[i] = matrix
-##             else:
-##                 value_matrix[i] = x
-##         return_array = zeros((truth_matrix.rows(), truth_matrix.columns()))._M
-##         for i in xrange(truth_matrix.rows()):
-##             for j in xrange(truth_matrix.columns()):
-##                 if truth_matrix.get(i,j) == 1:
-##                     print(type(value_matrix[0]))
-##                     return_array.set(i,j, value_matrix[0].get(i,j))
-##                 else:
-##                     return_array.set(i,j, value_matrix[1].get(i,j))
-
-
-##     return( return_array )
-
-def nonzero(array):
-    """Return the indices of the elements that are non-zero.
-    Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements
-    in that dimension. The corresponding non-zero values can be obtained with:
-    Parameters:	
-    a : array_like
-    Input array.
-    Returns:	
-    tuple_of_arrays : tuple
-    Indices of elements that are non-zero.
-
-    >>> x = np.eye(3)
-
-    >>> x
-    array([[ 1.,  0.,  0.],
-    [ 0.,  1.,  0.],
-    [ 0.,  0.,  1.]])
-
-    >>> np.nonzero(x)
-    (array([0, 1, 2]), array([0, 1, 2]))
-
-    >>> x[np.nonzero(x)]
-    array([ 1.,  1.,  1.])
-
-    >>> np.transpose(np.nonzero(x))
-    array([[0, 0],
-       [1, 1],
-       [2, 2]])"""
-    
-    rowList = IntArrayList()
-    columnList = IntArrayList()
-    coordinateList = DoubleArrayList()
-    array._M.getNonZeros(rowList, columnList, coordinateList)
-#TODO update array function to deal with Int....
-    return(array(rowList), array(columnList))
-
-def repeat(array, repeat_tup, axis = 2):
-    """
-    Repeat elements of an array.
-
-    Parameters:
-    a : array_like
-    Input array.
-    repeats : {int, array of ints}
-    The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.
-    axis : int, optional
-    The axis along which to repeat values. By default, use the flattened input array, and return a flat output array.
-    Returns:
-    repeated_array : ndarray
-    Output array which has the same shape as a, except along the given axis.
-    See also
-    tile
-    Tile an array.
-    Examples
-
-    >>> x = np.array([[1,2],[3,4]])
-    >>> np.repeat(x, 2)
-    array([1, 1, 2, 2, 3, 3, 4, 4])
-    >>> np.repeat(x, 3, axis=1)
-    array([[1, 1, 1, 2, 2, 2],
-           [3, 3, 3, 4, 4, 4]])
-    >>> np.repeat(x, [1, 2], axis=0)
-    array([[1, 2],
-           [3, 4],
-           [3, 4]])
-        """
-    repeat = list(repeat_tup)
-    #TODO: Make sure this can handle sparse matrices as well.  See the vstack function for ideas.
-    matrix_factory = DoubleFactory2D.dense
-    if axis == 1:
-        repeat_matrix = matrix_factory.repeat(array._M.viewPart(0,0,array._M.rows(),1),1,repeat[0])
-        for i in range( array._M.columns())[1:]:
-            repeated_slice =  matrix_factory.repeat(array._M.viewPart(0, i, array._M.rows(), 1) , 1, repeat[i])
-            repeat_matrix = matrix_factory.appendColumns(repeat_matrix, repeated_slice )
-        return(repeat_matrix)
-    elif axis == 0:
-        repeat_matrix = matrix_factory.repeat(array._M.viewPart(0,0,1,array._M.columns()),repeat[0],1)
-        for i in range(array._M.columns())[1:]:   
-            repeat_matrix = matrix_factory.appendRows(repeat_matrix,
-                                                      matrix_factory.repeat(array._M.viewPart(i,0,1,array._M.columns()),
-                                                                            repeat[i], 1))
-            return(repeat_matrix)
-    else:
-        pass
-
-
-
-
-
-#One
-#From scipy sparse.lil_matrix, sparse.hstack, sparse.vstack
-
-def matrix():
-    #This is not pressing.  I updated my cobra modules to use array instead of
-    #matrix so this can be dealt with later.
-    pass
-
-
-
-    
-
-
diff --git a/cobra/oven/danielhyduke/jython/numpy/core/multiarray.py b/cobra/oven/danielhyduke/jython/numpy/core/multiarray.py
deleted file mode 100644
index f781dbf..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/core/multiarray.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#cobra.numjy.multiarray.py
-#Basic matrix class that is going to be used to mimic numpy.ndarray
-#capabilities.
-#
-#Derived from Simon Galbraith's ncajava Matrix.py file
-
-
-#To get numerictypes to work we just need to have these defined
-# from numpy.core.multiarray import typeinfo, dtype
-
-#TODO: add in the __eq_function
-
-
-__all__ = ['ndarray','array',]
-import java, javax, jarray
-from copy import deepcopy
-from cern.colt.list import IntArrayList, DoubleArrayList
-from cern.colt.matrix import DoubleMatrix2D
-from cern.colt.matrix.DoubleFactory2D import dense,sparse;
-from cern.colt.matrix.impl import DenseDoubleMatrix2D
-from cern.colt.matrix.impl import SparseDoubleMatrix2D
-from cern.colt.matrix.linalg import Algebra;
-from org.python.core.exceptions import ValueError as PyValueException;
-from org.python.core import PyString,PySlice,PySequence,PyList;
-
-class ndarray(javax.swing.table.AbstractTableModel):
-    _M = None;   
-    _name = 'data matrix'  
-    varname = ''
-    column_names = []
-    row_names = []
-           
-    def __init__(self, M=None, N=None, v=None, sparse=None):
-
-        """
-        M is the number of rows
-        N is the number of columns
-        v is the default value
-        """
-        if isinstance(M, DoubleMatrix2D):
-            self._M = M.copy();  
-        elif (isinstance(M, int) and isinstance(N, int)):
-            if sparse:
-                F = sparse
-            else:
-                F = dense;
-            if v is None:
-                self._M = F.make(M, N, 0);
-            elif isinstance(v, int): 
-                self._M = F.make(M, N, v);
-            elif isinstance(v, PyList):
-                self._M = F.make(jarray.array(v, 'd'),1)
-            elif isinstance(v, PyString):
-                self._M = F.random(M, N);
-            else:
-                if sparse:
-                    self._M = SparseDoubleMatrix2D(v)
-                else:
-                    self._M = ndarray(v)
-        self.shape = (self._M.rows(), self._M.columns())
-
-
-    def __copy__(self):
-        r = new.instance(self.__class__, self.__dict__.copy())
-        r._M = self._M.copy();
-        return r
-    
-    def __sub__(A, B):
-        [ar, ac]=size(A);        
-        C = ndarray(ar,ac,0);
-        for i in range(ar):
-            for j in range(ac):
-                C[i, j]=A[i, j]-B[i, j]
-        return C
-    
-    def __mul__(A, B):  
-        # need to check types and multiple based on them..     
-        try:
-            F = Algebra();
-            C=(F.mult(A._M, B._M));
-        except:
-            raise PyValueException, "Inner dimension mismatch in matrix multiply.";
-            return None;
-        return ndarray(C) 
-    def __div__(A, B):
-        try:
-            F = Algebra();
-            R = F.solve(A._M, B._M);
-            return R;
-        except (java.lang.IllegalArgumentException), e:
-            # check the error class types according to the matrix class so we can intelligently report the error.
-            print e.getMessage()
-            return None
-    def __repr__(self):
-        return self._M.toString();
-            
-    def __str__(self):
-        return self._M.toString();
-
-    def __sz__(self):        
-        if isinstance(self, ndarray):
-            x = self._M.rows();
-            y = self._M.columns();
-            return (x, y);
-        else:
-            raise PyValueException, "Argument must be a matrix.";   
-           
-    def __setitem__(self, idx, v):
-        if v is None:
-            print idx
-            raise PyValueException, "v is none"
-        if isinstance(v, float): 
-            self._M.set(idx[0], idx[1], v);
-            return
-        
-        Y = idx[1]
-        X = idx[0]
-        if isinstance(X, PyList):
-            X=map(lambda x: x, X)
-        elif isinstance(X, PySlice):
-            if X.start == None:
-               X=range(0, self._M.rows())
-        elif isinstance(X, int):
-            X = [X]
-        
-        if isinstance(Y, PyList):
-            Y = map(lambda x: x, Y);
-        elif isinstance(Y, PySlice):
-            if Y.start == None:
-               Y = range(0, self._M.cols())        
-        elif isinstance(Y, int):
-            Y = [Y];
-
-        order = 0
-        if len(X) > len(Y):
-            order = 1
-            
-        #print "the order is " , order    
-        if order == 0:
-            y = 1
-            for q in Y:
-                    x = 1
-                    for z in X: 
-         #               print z,q,x,y,v
-                        self._M.set(z, q, v[x, y])
-                        x += 1
-                    y += 1
-        else:
-            x = 1
-            for z in X:                    
-                    y = 1
-                    for q in Y:             
-                        self._M.set(z, q, v[x, y])
-                        y += 1
-                    x += 1
-                
-    def __getslice__(self, i, j):
-        if i.start != None:
-            x = range(i.start, i.stop);
-        else:
-            x = range(0, self._M.rows())
-        if j.start != None:    
-            y = range(j.start, j.stop)
-        else:
-            y = range(0, self._M.columns())
-        
-        return ndarray(self._M.viewSelection(x, y))
-
-         
-    def __getitem__(self, idx):
-        x = idx[0]
-        y = idx[1]
-        if x < 0 or y < 0:
-            raise PyValueException, "Index must be positive number"
-          # this will fail on pyslice
-        
-        if isinstance(x, PySlice):
-            if x.start != None:
-                x = range(x.start, x.stop);
-            else:
-                x = range(0, self._M.rows())                   
-        elif isinstance(x, int):
-            x = x
-            x = [x] 
-        elif isinstance(x, PyList):
-            x = map(lambda x: x, x)
-       
-        if isinstance(y, int):
-               y = y
-               y = [y]
-        elif isinstance(y, PySlice):
-            if y.start != None:
-                y = range(y.start, y.stop)
-            else:
-                y = range(0, self._M.columns())         
-        elif isinstance(y, PySlice):
-            if y.start != None:    
-                y = range(y.start, y.stop)
-            else:
-                y = range(0, self._M.columns())
-        elif isinstance(y, PyList):
-            y = map(lambda x: x, y)
-
-        if len(x) < 2 and len(y) < 2:
-            r = self._M.getQuick(x[0], y[0])
-            return float(r)  # this is a specific element
-        else:
-            return ndarray(self._M.viewSelection(x, y))
-
-
-def array(A, dtype=float, copy=True, subok=False, ndmin=True):
-    """Create an array to mimic the features of the numpy.ndarray
-
-    Parameters
-    ----------
-    A:  An array like object.  Currently a list of lists or tuple
-    of tuples that will be converted into a 2D array.
-
-    dtype: data-type. The default is double.
-
-    copy: makes a deepcopy of the elements in the array if set to True
-
-    order: dummy variable to match numpy.array interface
-
-    subok: dummy variable to match numpy.array interface
-
-    ndmin: dummy variable to match numpy.array interface
-    """
-    if isinstance(A, ndarray):
-        return(A)
-    #BUG: What is the point of this?
-    if isinstance(A, IntArrayList):
-        the_array = ndarray(1, A.size())
-        for x in xrange(A.size()):
-            print(A.get(x))
-            the_array[x] = A.get(x)
-            return(the_array)
-    
-    number_of_rows = len(A)
-    if hasattr(A[0], '__iter__'):
-        number_of_columns = len(A[0])
-    else:
-        number_of_columns = number_of_rows
-        number_of_rows = 1
-        A = [A]
-    the_array = ndarray(number_of_rows, number_of_columns)
-    if number_of_columns > 1 and number_of_rows > 1:
-        the_array.ndim = 2
-    else:
-        the_array.ndim = 1
-    #This can be sped up significantly
-    for i in range(number_of_rows):
-        the_row = A[i]
-        for j in range(number_of_columns):
-            the_array[i, j] = dtype(the_row[j])
-    return(the_array)
-
-def zeros(shape, dtype=float, order='C'):
-    """Return a new array of given shape and type, filled with zeros.
-    
-    Parameters
-    ----------
-    shape : {tuple of ints, int}
-        Shape of the new array, e.g., ``(2, 3)`` or ``2``.
-    dtype : data-type, optional
-        The desired data-type for the array, e.g., `numpy.int8`.  Default is
-        `numpy.float64`.
-    order : {'C', 'F'}, optional
-        Whether to store multidimensional data in C- or Fortran-contiguous
-        (row- or column-wise) order in memory.
-    
-    Returns
-    -------
-    out : ndarray
-        Array of zeros with the given shape, dtype, and order.
-    
-    See Also
-    --------
-    numpy.zeros_like : Return an array of zeros with shape and type of input.
-    numpy.ones_like : Return an array of ones with shape and type of input.
-    numpy.empty_like : Return an empty array with shape and type of input.
-    numpy.ones : Return a new array setting values to one.
-    numpy.empty : Return a new uninitialized array.
-    
-    Examples
-    --------
-    >>> np.zeros(5)
-    array([ 0.,  0.,  0.,  0.,  0.])
-    
-    >>> np.zeros((5,), dtype=numpy.int)
-    array([0, 0, 0, 0, 0])
-    
-    >>> np.zeros((2, 1))
-    array([[ 0.],
-           [ 0.]])
-    
-    >>> s = (2,2)
-    >>> np.zeros(s)
-    array([[ 0.,  0.],
-           [ 0.,  0.]])
-    
-    >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')])
-    array([(0, 0), (0, 0)],
-          dtype=[('x', '<i4'), ('y', '<i4')])
-"""
-    return(ndarray(shape[0], shape[1]))
-
-def empty(shape, dtype=float, order='C'):
-    """Because we don't like using unitialized arrays.  We'll just
-    call the zeros function even thought it might be a tad slower
-           """
-    return(zeros(shape, dtype, order))
diff --git a/cobra/oven/danielhyduke/jython/numpy/core/numeric.py b/cobra/oven/danielhyduke/jython/numpy/core/numeric.py
deleted file mode 100755
index 1bd6f51..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/core/numeric.py
+++ /dev/null
@@ -1,2217 +0,0 @@
-#TODO: Convert this to use the cern.colt class items.
-__all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
-           'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype',
-           'fromstring', 'fromfile', 'frombuffer','newbuffer',
-           'getbuffer', 'int_asbuffer', 'where', 'argwhere',
-           'concatenate', 'fastCopyAndTranspose', 'lexsort',
-           'set_numeric_ops', 'can_cast',
-           'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
-           'isfortran', 'empty_like', 'zeros_like',
-           'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
-           'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
-           'array2string', 'get_printoptions', 'set_printoptions',
-           'array_repr', 'array_str', 'set_string_function',
-           'little_endian', 'require',
-           'fromiter', 'array_equal', 'array_equiv',
-           'indices', 'fromfunction',
-           'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
-           'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
-           'seterr', 'geterr', 'setbufsize', 'getbufsize',
-           'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
-           'Inf', 'inf', 'infty', 'Infinity',
-           'nan', 'NaN', 'False_', 'True_', 'bitwise_not']#,
-#           'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS']
-
-import sys
-import warnings
-#import multiarray
-import Matrix as multiarray
-#import umath
-from umath import *
-import numerictypes
-from numerictypes import *
-
-bitwise_not = invert
-
-#CLIP = multiarray.CLIP
-#WRAP = multiarray.WRAP
-#RAISE = multiarray.RAISE
-#MAXDIMS = multiarray.MAXDIMS
-#ALLOW_THREADS = multiarray.ALLOW_THREADS
-#BUFSIZE = multiarray.BUFSIZE
-
-ndarray = multiarray.ndarray
-flatiter = multiarray.flatiter
-broadcast = multiarray.broadcast
-dtype = multiarray.dtype
-ufunc = type(sin)
-
-
-# originally from Fernando Perez's IPython
-def zeros_like(a):
-    """
-    Returns an array of zeros with the same shape and type as a given array.
-
-    Equivalent to ``a.copy().fill(0)``.
-
-    Parameters
-    ----------
-    a : array_like
-        The shape and data-type of `a` defines the parameters of
-        the returned array.
-
-    Returns
-    -------
-    out : ndarray
-        Array of zeros with same shape and type as `a`.
-
-    See Also
-    --------
-    numpy.ones_like : Return an array of ones with shape and type of input.
-    numpy.empty_like : Return an empty array with shape and type of input.
-    numpy.zeros : Return a new array setting values to zero.
-    numpy.ones : Return a new array setting values to one.
-    numpy.empty : Return a new uninitialized array.
-
-    Examples
-    --------
-    >>> x = np.arange(6)
-    >>> x = x.reshape((2, 3))
-    >>> x
-    array([[0, 1, 2],
-           [3, 4, 5]])
-    >>> np.zeros_like(x)
-    array([[0, 0, 0],
-           [0, 0, 0]])
-
-    """
-    if isinstance(a, ndarray):
-        res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
-        res.fill(0)
-        return res
-    try:
-        wrap = a.__array_wrap__
-    except AttributeError:
-        wrap = None
-    a = asarray(a)
-    res = zeros(a.shape, a.dtype)
-    if wrap:
-        res = wrap(res)
-    return res
-
-def empty_like(a):
-    """
-    Create a new array with the same shape and type as another.
-
-    Parameters
-    ----------
-    a : ndarray
-        Returned array will have same shape and type as `a`.
-
-    See Also
-    --------
-    zeros_like, ones_like, zeros, ones, empty
-
-    Notes
-    -----
-    This function does *not* initialize the returned array; to do that use
-    `zeros_like` or `ones_like` instead.
-
-    Examples
-    --------
-    >>> a = np.array([[1,2,3],[4,5,6]])
-    >>> np.empty_like(a)
-    >>> np.empty_like(a)
-    array([[-1073741821, -1067702173,       65538],    #random data
-           [      25670,    23454291,       71800]])
-
-    """
-    if isinstance(a, ndarray):
-        res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
-        return res
-    try:
-        wrap = a.__array_wrap__
-    except AttributeError:
-        wrap = None
-    a = asarray(a)
-    res = empty(a.shape, a.dtype)
-    if wrap:
-        res = wrap(res)
-    return res
-
-# end Fernando's utilities
-
-
-def extend_all(module):
-    adict = {}
-    for a in __all__:
-        adict[a] = 1
-    try:
-        mall = getattr(module, '__all__')
-    except AttributeError:
-        mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
-    for a in mall:
-        if a not in adict:
-            __all__.append(a)
-
-extend_all(umath)
-extend_all(numerictypes)
-
-newaxis = None
-
-
-arange = multiarray.arange
-array = multiarray.array
-zeros = multiarray.zeros
-empty = multiarray.empty
-fromstring = multiarray.fromstring
-fromiter = multiarray.fromiter
-fromfile = multiarray.fromfile
-frombuffer = multiarray.frombuffer
-newbuffer = multiarray.newbuffer
-getbuffer = multiarray.getbuffer
-int_asbuffer = multiarray.int_asbuffer
-where = multiarray.where
-concatenate = multiarray.concatenate
-fastCopyAndTranspose = multiarray._fastCopyAndTranspose
-set_numeric_ops = multiarray.set_numeric_ops
-can_cast = multiarray.can_cast
-lexsort = multiarray.lexsort
-compare_chararrays = multiarray.compare_chararrays
-putmask = multiarray.putmask
-
-def asarray(a, dtype=None, order=None):
-    """
-    Convert the input to an array.
-
-    Parameters
-    ----------
-    a : array_like
-        Input data, in any form that can be converted to an array.  This
-        includes lists, lists of tuples, tuples, tuples of tuples, tuples
-        of lists and ndarrays.
-    dtype : data-type, optional
-        By default, the data-type is inferred from the input data.
-    order : {'C', 'F'}, optional
-        Whether to use row-major ('C') or column-major ('FORTRAN') memory
-        representation.  Defaults to 'C'.
-
-    Returns
-    -------
-    out : ndarray
-        Array interpretation of `a`.  No copy is performed if the input
-        is already an ndarray.  If `a` is a subclass of ndarray, a base
-        class ndarray is returned.
-
-    See Also
-    --------
-    asanyarray : Similar function which passes through subclasses.
-    ascontiguousarray : Convert input to a contiguous array.
-    asfarray : Convert input to a floating point ndarray.
-    asfortranarray : Convert input to an ndarray with column-major
-                     memory order.
-    asarray_chkfinite : Similar function which checks input for NaNs and Infs.
-    fromiter : Create an array from an iterator.
-    fromfunction : Construct an array by executing a function on grid
-                   positions.
-
-    Examples
-    --------
-    Convert a list into an array:
-
-    >>> a = [1, 2]
-    >>> np.asarray(a)
-    array([1, 2])
-
-    Existing arrays are not copied:
-
-    >>> a = np.array([1, 2])
-    >>> np.asarray(a) is a
-    True
-
-    """
-    return array(a, dtype, copy=False, order=order)
-
-def asanyarray(a, dtype=None, order=None):
-    """
-    Convert the input to a ndarray, but pass ndarray subclasses through.
-
-    Parameters
-    ----------
-    a : array_like
-        Input data, in any form that can be converted to an array.  This
-        includes scalars, lists, lists of tuples, tuples, tuples of tuples,
-        tuples of lists and ndarrays.
-    dtype : data-type, optional
-        By default, the data-type is inferred from the input data.
-    order : {'C', 'F'}, optional
-        Whether to use row-major ('C') or column-major ('F') memory
-        representation.  Defaults to 'C'.
-
-    Returns
-    -------
-    out : ndarray or an ndarray subclass
-        Array interpretation of `a`.  If `a` is an ndarray or a subclass
-        of ndarray, it is returned as-is and no copy is performed.
-
-    See Also
-    --------
-    asarray : Similar function which always returns ndarrays.
-    ascontiguousarray : Convert input to a contiguous array.
-    asfarray : Convert input to a floating point ndarray.
-    asfortranarray : Convert input to an ndarray with column-major
-                     memory order.
-    asarray_chkfinite : Similar function which checks input for NaNs and Infs.
-    fromiter : Create an array from an iterator.
-    fromfunction : Construct an array by executing a function on grid
-                   positions.
-
-    Examples
-    --------
-    Convert a list into an array:
-
-    >>> a = [1, 2]
-    >>> np.asanyarray(a)
-    array([1, 2])
-
-    Instances of `ndarray` subclasses are passed through as-is:
-
-    >>> a = np.matrix([1, 2])
-    >>> np.asanyarray(a) is a
-    True
-
-    """
-    return array(a, dtype, copy=False, order=order, subok=True)
-
-def ascontiguousarray(a, dtype=None):
-    """
-    Return a contiguous array in memory (C order).
-
-    Parameters
-    ----------
-    a : array_like
-        Input array.
-    dtype : str or dtype object, optional
-        Data-type of returned array.
-
-    Returns
-    -------
-    out : ndarray
-        Contiguous array of same shape and content as `a`, with type `dtype`
-        if specified.
-
-    See Also
-    --------
-    asfortranarray : Convert input to an ndarray with column-major
-                     memory order.
-    require : Return an ndarray that satisfies requirements.
-    ndarray.flags : Information about the memory layout of the array.
-
-    Examples
-    --------
-    >>> x = np.arange(6).reshape(2,3)
-    >>> np.ascontiguousarray(x, dtype=np.float32)
-    array([[ 0.,  1.,  2.],
-           [ 3.,  4.,  5.]], dtype=float32)
-    >>> x.flags['C_CONTIGUOUS']
-    True
-
-    """
-    return array(a, dtype, copy=False, order='C', ndmin=1)
-
-def asfortranarray(a, dtype=None):
-    """
-    Return an array laid out in Fortran order in memory.
-
-    Parameters
-    ----------
-    a : array_like
-        Input array.
-    dtype : str or dtype object, optional
-        By default, the data-type is inferred from the input data.
-
-    Returns
-    -------
-    out : ndarray
-        The input `a` in Fortran, or column-major, order.
-
-    See Also
-    --------
-    ascontiguousarray : Convert input to a contiguous (C order) array.
-    asanyarray : Convert input to an ndarray with either row or
-        column-major memory order.
-    require : Return an ndarray that satisfies requirements.
-    ndarray.flags : Information about the memory layout of the array.
-
-    Examples
-    --------
-    >>> x = np.arange(6).reshape(2,3)
-    >>> y = np.asfortranarray(x)
-    >>> x.flags['F_CONTIGUOUS']
-    False
-    >>> y.flags['F_CONTIGUOUS']
-    True
-
-    """
-    return array(a, dtype, copy=False, order='F', ndmin=1)
-
-def require(a, dtype=None, requirements=None):
-    """
-    Return an ndarray of the provided type that satisfies requirements.
-
-    This function is useful to be sure that an array with the correct flags
-    is returned for passing to compiled code (perhaps through ctypes).
-
-    Parameters
-    ----------
-    a : array_like
-       The object to be converted to a type-and-requirement-satisfying array.
-    dtype : data-type
-       The required data-type, the default data-type is float64).
-    requirements : str or list of str
-       The requirements list can be any of the following
-
-       * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
-       * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
-       * 'ALIGNED' ('A')      - ensure a data-type aligned array
-       * 'WRITEABLE' ('W')    - ensure a writable array
-       * 'OWNDATA' ('O')      - ensure an array that owns its own data
-
-    See Also
-    --------
-    asarray : Convert input to an ndarray.
-    asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
-    ascontiguousarray : Convert input to a contiguous array.
-    asfortranarray : Convert input to an ndarray with column-major
-                     memory order.
-    ndarray.flags : Information about the memory layout of the array.
-
-    Notes
-    -----
-    The returned array will be guaranteed to have the listed requirements
-    by making a copy if needed.
-
-    Examples
-    --------
-    >>> x = np.arange(6).reshape(2,3)
-    >>> x.flags
-      C_CONTIGUOUS : True
-      F_CONTIGUOUS : False
-      OWNDATA : False
-      WRITEABLE : True
-      ALIGNED : True
-      UPDATEIFCOPY : False
-
-    >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
-    >>> y.flags
-      C_CONTIGUOUS : False
-      F_CONTIGUOUS : True
-      OWNDATA : True
-      WRITEABLE : True
-      ALIGNED : True
-      UPDATEIFCOPY : False
-
-    """
-    if requirements is None:
-        requirements = []
-    else:
-        requirements = [x.upper() for x in requirements]
-
-    if not requirements:
-        return asanyarray(a, dtype=dtype)
-
-    if 'ENSUREARRAY' in requirements or 'E' in requirements:
-        subok = False
-    else:
-        subok = True
-
-    arr = array(a, dtype=dtype, copy=False, subok=subok)
-
-    copychar = 'A'
-    if 'FORTRAN' in requirements or \
-       'F_CONTIGUOUS' in requirements or \
-       'F' in requirements:
-        copychar = 'F'
-    elif 'CONTIGUOUS' in requirements or \
-         'C_CONTIGUOUS' in requirements or \
-         'C' in requirements:
-        copychar = 'C'
-
-    for prop in requirements:
-        if not arr.flags[prop]:
-            arr = arr.copy(copychar)
-            break
-    return arr
-
-def isfortran(a):
-    """
-    Returns True if array is arranged in Fortran-order in memory
-    and dimension > 1.
-
-    Parameters
-    ----------
-    a : ndarray
-        Input array.
-
-
-    Examples
-    --------
-
-    np.array allows to specify whether the array is written in C-contiguous
-    order (last index varies the fastest), or FORTRAN-contiguous order in
-    memory (first index varies the fastest).
-
-    >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
-    >>> a
-    array([[1, 2, 3],
-           [4, 5, 6]])
-    >>> np.isfortran(a)
-    False
-
-    >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
-    >>> b
-    array([[1, 2, 3],
-           [4, 5, 6]])
-    >>> np.isfortran(b)
-    True
-
-
-    The transpose of a C-ordered array is a FORTRAN-ordered array.
-
-    >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
-    >>> a
-    array([[1, 2, 3],
-           [4, 5, 6]])
-    >>> np.isfortran(a)
-    False
-    >>> b = a.T
-    >>> b
-    array([[1, 4],
-           [2, 5],
-           [3, 6]])
-    >>> np.isfortran(b)
-    True
-
-    1-D arrays always evaluate as False.
-
-    >>> np.isfortran(np.array([1, 2], order='FORTRAN'))
-    False
-
-    """
-    return a.flags.fnc
-
-def argwhere(a):
-    """
-    Find the indices of array elements that are non-zero, grouped by element.
-
-    Parameters
-    ----------
-    a : array_like
-        Input data.
-
-    Returns
-    -------
-    index_array : ndarray
-        Indices of elements that are non-zero. Indices are grouped by element.
-
-    See Also
-    --------
-    where, nonzero
-
-    Notes
-    -----
-    ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
-
-    The output of ``argwhere`` is not suitable for indexing arrays.
-    For this purpose use ``where(a)`` instead.
-
-    Examples
-    --------
-    >>> x = np.arange(6).reshape(2,3)
-    >>> x
-    array([[0, 1, 2],
-           [3, 4, 5]])
-    >>> np.argwhere(x>1)
-    array([[0, 2],
-           [1, 0],
-           [1, 1],
-           [1, 2]])
-
-    """
-    return transpose(asanyarray(a).nonzero())
-
-def flatnonzero(a):
-    """
-    Return indices that are non-zero in the flattened version of a.
-
-    This is equivalent to a.ravel().nonzero()[0].
-
-    Parameters
-    ----------
-    a : ndarray
-        Input array.
-
-    Returns
-    -------
-    res : ndarray
-        Output array, containing the indices of the elements of `a.ravel()`
-        that are non-zero.
-
-    See Also
-    --------
-    nonzero : Return the indices of the non-zero elements of the input array.
-    ravel : Return a 1-D array containing the elements of the input array.
-
-    Examples
-    --------
-    >>> x = np.arange(-2, 3)
-    >>> x
-    array([-2, -1,  0,  1,  2])
-    >>> np.flatnonzero(x)
-    array([0, 1, 3, 4])
-
-    Use the indices of the non-zero elements as an index array to extract
-    these elements:
-
-    >>> x.ravel()[np.flatnonzero(x)]
-    array([-2, -1,  1,  2])
-
-    """
-    return a.ravel().nonzero()[0]
-
-_mode_from_name_dict = {'v': 0,
-                        's' : 1,
-                        'f' : 2}
-
-def _mode_from_name(mode):
-    if isinstance(mode, type("")):
-        return _mode_from_name_dict[mode.lower()[0]]
-    return mode
-
-def correlate(a,v,mode='valid',old_behavior=True):
-    """
-    Discrete, linear correlation of two 1-dimensional sequences.
-
-    This function is equivalent to
-
-    >>> np.convolve(a, v[::-1], mode=mode)
-
-    where ``v[::-1]`` is the reverse of `v`.
-
-    Parameters
-    ----------
-    a, v : array_like
-        Input sequences.
-    mode : {'valid', 'same', 'full'}, optional
-        Refer to the `convolve` docstring.  Note that the default
-        is `valid`, unlike `convolve`, which uses `full`.
-    old_behavior : bool
-        If True, uses the old, numeric behavior (correlate(a,v) == correlate(v,
-        a), and the conjugate is not taken for complex arrays). If False, uses
-        the conventional signal processing definition (see note).
-
-    See Also
-    --------
-    convolve : Discrete, linear convolution of two
-               one-dimensional sequences.
-
-    Note
-    ----
-    If old_behavior is False, this function computes the correlation as
-    generally defined in signal processing texts::
-
-        z[k] = sum_n a[n] * conj(v[n+k])
-
-    with a and v sequences being zero-padded where necessary and conj being the
-    conjugate.
-
-    Examples
-    --------
-    >>> np.correlate([1, 2, 3], [0, 1, 0.5])
-    array([ 3.5])
-    >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
-    array([ 2. ,  3.5,  3. ])
-    >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
-    array([ 0.5,  2. ,  3.5,  3. ,  0. ])
-
-    """
-    mode = _mode_from_name(mode)
-    if old_behavior:
-        warnings.warn("""
-The current behavior of correlate is deprecated for 1.4.0, and will be removed
-for NumPy 1.5.0.
-    
-The new behavior fits the conventional definition of correlation: inputs are
-never swapped, and the second argument is conjugated for complex arrays.""",
-            DeprecationWarning)
-        return multiarray.correlate(a,v,mode)
-    else:
-        return multiarray.correlate2(a,v,mode)
-
-def convolve(a,v,mode='full'):
-    """
-    Returns the discrete, linear convolution of two one-dimensional sequences.
-
-    The convolution operator is often seen in signal processing, where it
-    models the effect of a linear time-invariant system on a signal [1]_.  In
-    probability theory, the sum of two independent random variables is
-    distributed according to the convolution of their individual
-    distributions.
-
-    Parameters
-    ----------
-    a : (N,) array_like
-        First one-dimensional input array.
-    v : (M,) array_like
-        Second one-dimensional input array.
-    mode : {'full', 'valid', 'same'}, optional
-        'full':
-          By default, mode is 'full'.  This returns the convolution
-          at each point of overlap, with an output shape of (N+M-1,). At
-          the end-points of the convolution, the signals do not overlap
-          completely, and boundary effects may be seen.
-
-        'same':
-          Mode `same` returns output of length ``max(M, N)``.  Boundary
-          effects are still visible.
-
-        'valid':
-          Mode `valid` returns output of length
-          ``max(M, N) - min(M, N) + 1``.  The convolution product is only given
-          for points where the signals overlap completely.  Values outside
-          the signal boundary have no effect.
-
-    Returns
-    -------
-    out : ndarray
-        Discrete, linear convolution of `a` and `v`.
-
-    See Also
-    --------
-    scipy.signal.fftconv : Convolve two arrays using the Fast Fourier
-                           Transform.
-    scipy.linalg.toeplitz : Used to construct the convolution operator.
-
-    Notes
-    -----
-    The discrete convolution operation is defined as
-
-    .. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] f[n - m]
-
-    It can be shown that a convolution :math:`x(t) * y(t)` in time/space
-    is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
-    domain, after appropriate padding (padding is necessary to prevent
-    circular convolution).  Since multiplication is more efficient (faster)
-    than convolution, the function `scipy.signal.fftconvolve` exploits the
-    FFT to calculate the convolution of large data-sets.
-
-    References
-    ----------
-    .. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
-
-    Examples
-    --------
-    Note how the convolution operator flips the second array
-    before "sliding" the two across one another:
-
-    >>> np.convolve([1, 2, 3], [0, 1, 0.5])
-    array([ 0. ,  1. ,  2.5,  4. ,  1.5])
-
-    Only return the middle values of the convolution.
-    Contains boundary effects, where zeros are taken
-    into account:
-
-    >>> np.convolve([1,2,3],[0,1,0.5], 'same')
-    array([ 1. ,  2.5,  4. ])
-
-    The two arrays are of the same length, so there
-    is only one position where they completely overlap:
-
-    >>> np.convolve([1,2,3],[0,1,0.5], 'valid')
-    array([ 2.5])
-
-    """
-    a,v = array(a, ndmin=1),array(v, ndmin=1)
-    if (len(v) > len(a)):
-        a, v = v, a
-    if len(a) == 0 :
-        raise ValueError('a cannot be empty')
-    if len(v) == 0 :
-        raise ValueError('v cannot be empty')
-    mode = _mode_from_name(mode)
-    return multiarray.correlate(a, v[::-1], mode)
-
-def outer(a,b):
-    """
-    Returns the outer product of two vectors.
-
-    Given two vectors, ``[a0, a1, ..., aM]`` and ``[b0, b1, ..., bN]``,
-    the outer product becomes::
-
-      [[a0*b0  a0*b1 ... a0*bN ]
-       [a1*b0    .
-       [ ...          .
-       [aM*b0            aM*bN ]]
-
-    Parameters
-    ----------
-    a : array_like, shaped (M,)
-        First input vector.  If either of the input vectors are not
-        1-dimensional, they are flattened.
-    b : array_like, shaped (N,)
-        Second input vector.
-
-    Returns
-    -------
-    out : ndarray, shaped (M, N)
-        ``out[i, j] = a[i] * b[j]``
-
-    Notes
-    -----
-    The outer product of vectors is a special case of the Kronecker product.
-
-    Examples
-    --------
-    >>> x = np.array(['a', 'b', 'c'], dtype=object)
-
-    >>> np.outer(x, [1, 2, 3])
-    array([[a, aa, aaa],
-           [b, bb, bbb],
-           [c, cc, ccc]], dtype=object)
-
-    """
-    a = asarray(a)
-    b = asarray(b)
-    return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
-
-# try to import blas optimized dot if available
-try:
-    # importing this changes the dot function for basic 4 types
-    # to blas-optimized versions.
-    from _dotblas import dot, vdot, inner, alterdot, restoredot
-except ImportError:
-    # docstrings are in add_newdocs.py
-    inner = multiarray.inner
-    dot = multiarray.dot
-    def vdot(a, b):
-        return dot(asarray(a).ravel().conj(), asarray(b).ravel())
-    def alterdot():
-        pass
-    def restoredot():
-        pass
-
-def tensordot(a, b, axes=2):
-    """
-    Returns the tensor dot product for (ndim >= 1) arrays along an axes.
-
-    The first element of the sequence determines the axis or axes
-    in `a` to sum over, and the second element in `axes` argument sequence
-    determines the axis or axes in `b` to sum over.
-
-    Parameters
-    ----------
-    a : array_like
-        Input array.
-    b : array_like
-        Input array.
-    axes : shape tuple
-        Axes to be summed over.
-
-    See Also
-    --------
-    dot
-
-    Notes
-    -----
-    r_{xxx, yyy} = \\sum_k a_{xxx,k} b_{k,yyy}
-
-    When there is more than one axis to sum over, the corresponding
-    arguments to axes should be sequences of the same length with the first
-    axis to sum over given first in both sequences, the second axis second,
-    and so forth.
-
-    If the `axes` argument is an integer, N, then the last N dimensions of `a`
-    and first N dimensions of `b` are summed over.
-
-    Examples
-    --------
-    >>> a = np.arange(60.).reshape(3,4,5)
-    >>> b = np.arange(24.).reshape(4,3,2)
-    >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
-    >>> c.shape
-    (5, 2)
-    >>> c
-    array([[ 4400.,  4730.],
-           [ 4532.,  4874.],
-           [ 4664.,  5018.],
-           [ 4796.,  5162.],
-           [ 4928.,  5306.]])
-
-    >>> # A slower but equivalent way of computing the same...
-    >>> c = np.zeros((5,2))
-    >>> for i in range(5):
-    ...   for j in range(2):
-    ...     for k in range(3):
-    ...       for n in range(4):
-    ...         c[i,j] += a[k,n,i] * b[n,k,j]
-
-    """
-    try:
-        iter(axes)
-    except:
-        axes_a = range(-axes,0)
-        axes_b = range(0,axes)
-    else:
-        axes_a, axes_b = axes
-    try:
-        na = len(axes_a)
-        axes_a = list(axes_a)
-    except TypeError:
-        axes_a = [axes_a]
-        na = 1
-    try:
-        nb = len(axes_b)
-        axes_b = list(axes_b)
-    except TypeError:
-        axes_b = [axes_b]
-        nb = 1
-
-    a, b = asarray(a), asarray(b)
-    as_ = a.shape
-    nda = len(a.shape)
-    bs = b.shape
-    ndb = len(b.shape)
-    equal = True
-    if (na != nb): equal = False
-    else:
-        for k in xrange(na):
-            if as_[axes_a[k]] != bs[axes_b[k]]:
-                equal = False
-                break
-            if axes_a[k] < 0:
-                axes_a[k] += nda
-            if axes_b[k] < 0:
-                axes_b[k] += ndb
-    if not equal:
-        raise ValueError, "shape-mismatch for sum"
-
-    # Move the axes to sum over to the end of "a"
-    # and to the front of "b"
-    notin = [k for k in range(nda) if k not in axes_a]
-    newaxes_a = notin + axes_a
-    N2 = 1
-    for axis in axes_a:
-        N2 *= as_[axis]
-    newshape_a = (-1, N2)
-    olda = [as_[axis] for axis in notin]
-
-    notin = [k for k in range(ndb) if k not in axes_b]
-    newaxes_b = axes_b + notin
-    N2 = 1
-    for axis in axes_b:
-        N2 *= bs[axis]
-    newshape_b = (N2, -1)
-    oldb = [bs[axis] for axis in notin]
-
-    at = a.transpose(newaxes_a).reshape(newshape_a)
-    bt = b.transpose(newaxes_b).reshape(newshape_b)
-    res = dot(at, bt)
-    return res.reshape(olda + oldb)
-
-def roll(a, shift, axis=None):
-    """
-    Roll array elements along a given axis.
-
-    Elements that roll beyond the last position are re-introduced at
-    the first.
-
-    Parameters
-    ----------
-    a : array_like
-        Input array.
-    shift : int
-        The number of places by which elements are shifted.
-    axis : int, optional
-        The axis along which elements are shifted.  By default, the array
-        is flattened before shifting, after which the original
-        shape is restored.
-
-    Returns
-    -------
-    res : ndarray
-        Output array, with the same shape as `a`.
-
-    See Also
-    --------
-    rollaxis : Roll the specified axis backwards, until it lies in a
-               given position.
-
-    Examples
-    --------
-    >>> x = np.arange(10)
-    >>> np.roll(x, 2)
-    array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
-
-    >>> x2 = np.reshape(x, (2,5))
-    >>> x2
-    array([[0, 1, 2, 3, 4],
-           [5, 6, 7, 8, 9]])
-    >>> np.roll(x2, 1)
-    array([[9, 0, 1, 2, 3],
-           [4, 5, 6, 7, 8]])
-    >>> np.roll(x2, 1, axis=0)
-    array([[5, 6, 7, 8, 9],
-           [0, 1, 2, 3, 4]])
-    >>> np.roll(x2, 1, axis=1)
-    array([[4, 0, 1, 2, 3],
-           [9, 5, 6, 7, 8]])
-
-    """
-    a = asanyarray(a)
-    if axis is None:
-        n = a.size
-        reshape = True
-    else:
-        n = a.shape[axis]
-        reshape = False
-    shift %= n
-    indexes = concatenate((arange(n-shift,n),arange(n-shift)))
-    res = a.take(indexes, axis)
-    if reshape:
-        return res.reshape(a.shape)
-    else:
-        return res
-
-def rollaxis(a, axis, start=0):
-    """
-    Roll the specified axis backwards, until it lies in a given position.
-
-    Parameters
-    ----------
-    a : ndarray
-        Input array.
-    axis : int
-        The axis to roll backwards.  The positions of the other axes do not
-        change relative to one another.
-    start : int, optional
-        The axis is rolled until it lies before this position.
-
-    Returns
-    -------
-    res : ndarray
-        Output array.
-
-    See Also
-    --------
-    roll : Roll the elements of an array by a number of positions along a
-           given axis.
-
-    Examples
-    --------
-    >>> a = np.ones((3,4,5,6))
-    >>> np.rollaxis(a, 3, 1).shape
-    (3, 6, 4, 5)
-    >>> np.rollaxis(a, 2).shape
-    (5, 3, 4, 6)
-    >>> np.rollaxis(a, 1, 4).shape
-    (3, 5, 6, 4)
-
-    """
-    n = a.ndim
-    if axis < 0:
-        axis += n
-    if start < 0:
-        start += n
-    msg = 'rollaxis: %s (%d) must be >=0 and < %d'
-    if not (0 <= axis < n):
-        raise ValueError, msg % ('axis', axis, n)
-    if not (0 <= start < n+1):
-        raise ValueError, msg % ('start', start, n+1)
-    if (axis < start): # it's been removed
-        start -= 1
-    if axis==start:
-        return a
-    axes = range(0,n)
-    axes.remove(axis)
-    axes.insert(start, axis)
-    return a.transpose(axes)
-
-# fix hack in scipy which imports this function
-def _move_axis_to_0(a, axis):
-    return rollaxis(a, axis, 0)
-
-def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
-    """
-    Return the cross product of two (arrays of) vectors.
-
-    The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
-    to both `a` and `b`.  If `a` and `b` are arrays of vectors, the vectors
-    are defined by the last axis of `a` and `b` by default, and these axes
-    can have dimensions 2 or 3.  Where the dimension of either `a` or `b` is
-    2, the third component of the input vector is assumed to be zero and the
-    cross product calculated accordingly.  In cases where both input vectors
-    have dimension 2, the z-component of the cross product is returned.
-
-    Parameters
-    ----------
-    a : array_like
-        Components of the first vector(s).
-    b : array_like
-        Components of the second vector(s).
-    axisa : int, optional
-        Axis of `a` that defines the vector(s).  By default, the last axis.
-    axisb : int, optional
-        Axis of `b` that defines the vector(s).  By default, the last axis.
-    axisc : int, optional
-        Axis of `c` containing the cross product vector(s).  By default, the
-        last axis.
-    axis : int, optional
-        If defined, the axis of `a`, `b` and `c` that defines the vector(s)
-        and cross product(s).  Overrides `axisa`, `axisb` and `axisc`.
-
-    Returns
-    -------
-    c : ndarray
-        Vector cross product(s).
-
-    Raises
-    ------
-    ValueError
-        When the dimension of the vector(s) in `a` and/or `b` does not
-        equal 2 or 3.
-
-    See Also
-    --------
-    inner : Inner product
-    outer : Outer product.
-    ix_ : Construct index arrays.
-
-    Examples
-    --------
-    Vector cross-product.
-
-    >>> x = [1, 2, 3]
-    >>> y = [4, 5, 6]
-    >>> np.cross(x, y)
-    array([-3,  6, -3])
-
-    One vector with dimension 2.
-
-    >>> x = [1, 2]
-    >>> y = [4, 5, 6]
-    >>> np.cross(x, y)
-    array([12, -6, -3])
-
-    Equivalently:
-
-    >>> x = [1, 2, 0]
-    >>> y = [4, 5, 6]
-    >>> np.cross(x, y)
-    array([12, -6, -3])
-
-    Both vectors with dimension 2.
-
-    >>> x = [1,2]
-    >>> y = [4,5]
-    >>> np.cross(x, y)
-    -3
-
-    Multiple vector cross-products. Note that the direction of the cross
-    product vector is defined by the `right-hand rule`.
-
-    >>> x = np.array([[1,2,3], [4,5,6]])
-    >>> y = np.array([[4,5,6], [1,2,3]])
-    >>> np.cross(x, y)
-    array([[-3,  6, -3],
-           [ 3, -6,  3]])
-
-    The orientation of `c` can be changed using the `axisc` keyword.
-
-    >>> np.cross(x, y, axisc=0)
-    array([[-3,  3],
-           [ 6, -6],
-           [-3,  3]])
-
-    Change the vector definition of `x` and `y` using `axisa` and `axisb`.
-
-    >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
-    >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
-    >>> np.cross(x, y)
-    array([[ -6,  12,  -6],
-           [  0,   0,   0],
-           [  6, -12,   6]])
-    >>> np.cross(x, y, axisa=0, axisb=0)
-    array([[-24,  48, -24],
-           [-30,  60, -30],
-           [-36,  72, -36]])
-
-    """
-    if axis is not None:
-        axisa,axisb,axisc=(axis,)*3
-    a = asarray(a).swapaxes(axisa, 0)
-    b = asarray(b).swapaxes(axisb, 0)
-    msg = "incompatible dimensions for cross product\n"\
-          "(dimension must be 2 or 3)"
-    if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
-        raise ValueError(msg)
-    if a.shape[0] == 2:
-        if (b.shape[0] == 2):
-            cp = a[0]*b[1] - a[1]*b[0]
-            if cp.ndim == 0:
-                return cp
-            else:
-                return cp.swapaxes(0, axisc)
-        else:
-            x = a[1]*b[2]
-            y = -a[0]*b[2]
-            z = a[0]*b[1] - a[1]*b[0]
-    elif a.shape[0] == 3:
-        if (b.shape[0] == 3):
-            x = a[1]*b[2] - a[2]*b[1]
-            y = a[2]*b[0] - a[0]*b[2]
-            z = a[0]*b[1] - a[1]*b[0]
-        else:
-            x = -a[2]*b[1]
-            y = a[2]*b[0]
-            z = a[0]*b[1] - a[1]*b[0]
-    cp = array([x,y,z])
-    if cp.ndim == 1:
-        return cp
-    else:
-        return cp.swapaxes(0,axisc)
-
-
-#Use numarray's printing function
-from arrayprint import array2string, get_printoptions, set_printoptions
-
-_typelessdata = [int_, float_, complex_]
-if issubclass(intc, int):
-    _typelessdata.append(intc)
-
-if issubclass(longlong, int):
-    _typelessdata.append(longlong)
-
-def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
-    """
-    Return the string representation of an array.
-
-    Parameters
-    ----------
-    arr : ndarray
-        Input array.
-    max_line_width : int, optional
-        The maximum number of columns the string should span. Newline
-        characters split the string appropriately after array elements.
-    precision : int, optional
-        Floating point precision. Default is the current printing precision
-        (usually 8), which can be altered using `set_printoptions`.
-    suppress_small : bool, optional
-        Represent very small numbers as zero, default is False. Very small
-        is defined by `precision`, if the precision is 8 then
-        numbers smaller than 5e-9 are represented as zero.
-
-    Returns
-    -------
-    string : str
-      The string representation of an array.
-
-    See Also
-    --------
-    array_str, array2string, set_printoptions
-
-    Examples
-    --------
-    >>> np.array_repr(np.array([1,2]))
-    'array([1, 2])'
-    >>> np.array_repr(np.ma.array([0.]))
-    'MaskedArray([ 0.])'
-    >>> np.array_repr(np.array([], np.int32))
-    'array([], dtype=int32)'
-
-    >>> x = np.array([1e-6, 4e-7, 2, 3])
-    >>> np.array_repr(x, precision=6, suppress_small=True)
-    'array([ 0.000001,  0.      ,  2.      ,  3.      ])'
-
-    """
-    if arr.size > 0 or arr.shape==(0,):
-        lst = array2string(arr, max_line_width, precision, suppress_small,
-                           ', ', "array(")
-    else: # show zero-length shape unless it is (0,)
-        lst = "[], shape=%s" % (repr(arr.shape),)
-    typeless = arr.dtype.type in _typelessdata
-
-    if arr.__class__ is not ndarray:
-        cName= arr.__class__.__name__
-    else:
-        cName = "array"
-    if typeless and arr.size:
-        return cName + "(%s)" % lst
-    else:
-        typename=arr.dtype.name
-        lf = ''
-        if issubclass(arr.dtype.type, flexible):
-            if arr.dtype.names:
-                typename = "%s" % str(arr.dtype)
-            else:
-                typename = "'%s'" % str(arr.dtype)
-            lf = '\n'+' '*len("array(")
-        return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
-
-def array_str(a, max_line_width=None, precision=None, suppress_small=None):
-    """
-    Return a string representation of the data in an array.
-
-    The data in the array is returned as a single string. This function
-    is similar to `array_repr`, the difference is that `array_repr` also
-    returns information on the type of array and data type.
-
-    Parameters
-    ----------
-    a : ndarray
-        Input array.
-    max_line_width : int, optional
-        Inserts newlines if text is longer than `max_line_width`.
-    precision : int, optional
-        Floating point precision. Default is the current printing precision
-        (usually 8), which can be altered using set_printoptions.
-    suppress_small : bool, optional
-        Represent very small numbers as zero, default is False. Very small is
-        defined by precision, if the precision is 8 then numbers smaller than
-        5e-9 are represented as zero.
-
-    See Also
-    --------
-    array2string, array_repr, set_printoptions
-
-    Examples
-    --------
-    >>> np.array_str(np.arange(3))
-    >>> '[0 1 2]'
-
-    """
-    return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
-
-set_string_function = multiarray.set_string_function
-set_string_function(array_str, 0)
-set_string_function(array_repr, 1)
-
-little_endian = (sys.byteorder == 'little')
-
-
-def indices(dimensions, dtype=int):
-    """
-    Return an array representing the indices of a grid.
-
-    Compute an array where the subarrays contain index values 0,1,...
-    varying only along the corresponding axis.
-
-    Parameters
-    ----------
-    dimensions : sequence of ints
-        The shape of the grid.
-    dtype : dtype, optional
-        Data type of the result.
-
-    Returns
-    -------
-    grid : ndarray
-        The array of grid indices,
-        ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
-
-    See Also
-    --------
-    mgrid, meshgrid
-
-    Notes
-    -----
-    The output shape is obtained by prepending the number of dimensions
-    in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
-    ``(r0, ..., rN-1)`` of length ``N``, the output shape is
-    ``(N,r0,...,rN-1)``.
-
-    The subarrays ``grid[k]`` contains the N-D array of indices along the
-    ``k-th`` axis. Explicitly::
-
-        grid[k,i0,i1,...,iN-1] = ik
-
-    Examples
-    --------
-    >>> grid = np.indices((2, 3))
-    >>> grid.shape
-    (2,2,3)
-    >>> grid[0]        # row indices
-    array([[0, 0, 0],
-           [1, 1, 1]])
-    >>> grid[1]        # column indices
-    array([[0, 1, 2],
-           [0, 1, 2]])
-
-    The indices can be used as an index into an array.
-
-    >>> x = np.arange(20).reshape(5, 4)
-    >>> row, col = np.indices((2, 3))
-    >>> x[row, col]
-    array([[0, 1, 2],
-           [4, 5, 6]])
-
-    Note that it would be more straightforward in the above example to
-    extract the required elements directly with ``x[:2, :3]``.
-
-    """
-    dimensions = tuple(dimensions)
-    N = len(dimensions)
-    if N == 0:
-        return array([],dtype=dtype)
-    res = empty((N,)+dimensions, dtype=dtype)
-    for i, dim in enumerate(dimensions):
-        tmp = arange(dim,dtype=dtype)
-        tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
-        newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
-        val = zeros(newdim, dtype)
-        add(tmp, val, res[i])
-    return res
-
-def fromfunction(function, shape, **kwargs):
-    """
-    Construct an array by executing a function over each coordinate.
-
-    The resulting array therefore has a value ``fn(x, y, z)`` at
-    coordinate ``(x, y, z)``.
-
-    Parameters
-    ----------
-    fn : callable
-        The function is called with N parameters, each of which
-        represents the coordinates of the array varying along a
-        specific axis.  For example, if `shape` were ``(2, 2)``, then
-        the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
-        ``[[0, 1], [0, 1]]``.  `fn` must be capable of operating on
-        arrays, and should return a scalar value.
-    shape : (N,) tuple of ints
-        Shape of the output array, which also determines the shape of
-        the coordinate arrays passed to `fn`.
-    dtype : data-type, optional
-        Data-type of the coordinate arrays passed to `fn`.  By default,
-        `dtype` is float.
-
-    See Also
-    --------
-    indices, meshgrid
-
-    Notes
-    -----
-    Keywords other than `shape` and `dtype` are passed to the function.
-
-    Examples
-    --------
-    >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
-    array([[ True, False, False],
-           [False,  True, False],
-           [False, False,  True]], dtype=bool)
-
-    >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
-    array([[0, 1, 2],
-           [1, 2, 3],
-           [2, 3, 4]])
-
-    """
-    dtype = kwargs.pop('dtype', float)
-    args = indices(shape, dtype=dtype)
-    return function(*args,**kwargs)
-
-def isscalar(num):
-    """
-    Returns True if the type of `num` is a scalar type.
-
-    Parameters
-    ----------
-    num : any
-        Input argument, can be of any type and shape.
-
-    Returns
-    -------
-    val : bool
-        True if `num` is a scalar type, False if it is not.
-
-    Examples
-    --------
-    >>> np.isscalar(3.1)
-    True
-    >>> np.isscalar([3.1])
-    False
-    >>> np.isscalar(False)
-    True
-
-    """
-    if isinstance(num, generic):
-        return True
-    else:
-        return type(num) in ScalarType
-
-_lkup = {
-    '0':'0000',
-    '1':'0001',
-    '2':'0010',
-    '3':'0011',
-    '4':'0100',
-    '5':'0101',
-    '6':'0110',
-    '7':'0111',
-    '8':'1000',
-    '9':'1001',
-    'a':'1010',
-    'b':'1011',
-    'c':'1100',
-    'd':'1101',
-    'e':'1110',
-    'f':'1111',
-    'A':'1010',
-    'B':'1011',
-    'C':'1100',
-    'D':'1101',
-    'E':'1110',
-    'F':'1111',
-    'L':''}
-
-def binary_repr(num, width=None):
-    """
-    Return the binary representation of the input number as a string.
-
-    For negative numbers, if width is not given, a minus sign is added to the
-    front. If width is given, the two's complement of the number is
-    returned, with respect to that width.
-
-    In a two's-complement system negative numbers are represented by the two's
-    complement of the absolute value. This is the most common method of
-    representing signed integers on computers [1]_. A N-bit two's-complement
-    system can represent every integer in the range
-    :math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
-
-    Parameters
-    ----------
-    num : int
-        Only an integer decimal number can be used.
-    width : int, optional
-        The length of the returned string if `num` is positive, the length of
-        the two's complement if `num` is negative.
-
-    Returns
-    -------
-    bin : str
-        Binary representation of `num` or two's complement of `num`.
-
-    See Also
-    --------
-    base_repr: Return a string representation of a number in the given base
-               system.
-
-    Notes
-    -----
-    `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
-    faster.
-
-    References
-    ----------
-    .. [1] Wikipedia, "Two's complement",
-        http://en.wikipedia.org/wiki/Two's_complement
-
-    Examples
-    --------
-    >>> np.binary_repr(3)
-    '11'
-    >>> np.binary_repr(-3)
-    '-11'
-    >>> np.binary_repr(3, width=4)
-    '0011'
-
-    The two's complement is returned when the input number is negative and
-    width is specified:
-
-    >>> np.binary_repr(-3, width=4)
-    '1101'
-
-    """
-    sign = ''
-    if num < 0:
-        if width is None:
-            sign = '-'
-            num = -num
-        else:
-            # replace num with its 2-complement
-            num = 2**width + num
-    elif num == 0:
-        return '0'*(width or 1)
-    ostr = hex(num)
-    bin = ''.join([_lkup[ch] for ch in ostr[2:]])
-    bin = bin.lstrip('0')
-    if width is not None:
-        bin = bin.zfill(width)
-    return sign + bin
-
-def base_repr (number, base=2, padding=0):
-    """
-    Return a string representation of a number in the given base system.
-
-    Parameters
-    ----------
-    number : scalar
-        The value to convert. Only positive values are handled.
-    base : int
-        Convert `number` to the `base` number system. The valid range is 2-36,
-        the default value is 2.
-    padding : int, optional
-        Number of zeros padded on the left.
-
-    Returns
-    -------
-    out : str
-        String representation of `number` in `base` system.
-
-    See Also
-    --------
-    binary_repr : Faster version of `base_repr` for base 2 that also handles
-        negative numbers.
-
-    Examples
-    --------
-    >>> np.base_repr(3, 5)
-    '3'
-    >>> np.base_repr(6, 5)
-    '11'
-    >>> np.base_repr(7, base=5, padding=3)
-    '00012'
-
-    """
-    if number < 0:
-        raise ValueError("negative numbers not handled in base_repr")
-    if base > 36:
-        raise ValueError("bases greater than 36 not handled in base_repr")
-
-    chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-    import math
-    lnb = math.log(base)
-    res = padding*chars[0]
-    if number == 0:
-        return res + chars[0]
-    exponent = int (math.log (number)/lnb)
-    while(exponent >= 0):
-        term = long(base)**exponent
-        lead_digit = int(number / term)
-        res += chars[lead_digit]
-        number -= term*lead_digit
-        exponent -= 1
-    return res
-
-from cPickle import load, loads
-_cload = load
-_file = file
-
-def load(file):
-    """Wrapper around cPickle.load which accepts either a file-like object or
-    a filename.
-    """
-    if isinstance(file, type("")):
-        file = _file(file,"rb")
-    return _cload(file)
-
-# These are all essentially abbreviations
-# These might wind up in a special abbreviations module
-
-def _maketup(descr, val):
-    dt = dtype(descr)
-    # Place val in all scalar tuples:
-    fields = dt.fields
-    if fields is None:
-        return val
-    else:
-        res = [_maketup(fields[name][0],val) for name in dt.names]
-        return tuple(res)
-
-def ones(shape, dtype=None, order='C'):
-    """
-    Return a new array of given shape and type, filled with ones.
-
-    Please refer to the documentation for `zeros`.
-
-    See Also
-    --------
-    zeros
-
-    Examples
-    --------
-    >>> np.ones(5)
-    array([ 1.,  1.,  1.,  1.,  1.])
-
-    >>> np.ones((5,), dtype=np.int)
-    array([1, 1, 1, 1, 1])
-
-    >>> np.ones((2, 1))
-    array([[ 1.],
-           [ 1.]])
-
-    >>> s = (2,2)
-    >>> np.ones(s)
-    array([[ 1.,  1.],
-           [ 1.,  1.]])
-
-    """
-    a = empty(shape, dtype, order)
-    try:
-        a.fill(1)
-        # Above is faster now after addition of fast loops.
-        #a = zeros(shape, dtype, order)
-        #a+=1
-    except TypeError:
-        obj = _maketup(dtype, 1)
-        a.fill(obj)
-    return a
-
-def identity(n, dtype=None):
-    """
-    Return the identity array.
-
-    The identity array is a square array with ones on
-    the main diagonal.
-
-    Parameters
-    ----------
-    n : int
-        Number of rows (and columns) in `n` x `n` output.
-    dtype : data-type, optional
-        Data-type of the output.  Defaults to ``float``.
-
-    Returns
-    -------
-    out : ndarray
-        `n` x `n` array with its main diagonal set to one,
-        and all other elements 0.
-
-    Examples
-    --------
-    >>> np.identity(3)
-    array([[ 1.,  0.,  0.],
-           [ 0.,  1.,  0.],
-           [ 0.,  0.,  1.]])
-
-    """
-    a = zeros((n,n), dtype=dtype)
-    a.flat[::n+1] = 1
-    return a
-
-def allclose(a, b, rtol=1.e-5, atol=1.e-8):
-    """
-    Returns True if two arrays are element-wise equal within a tolerance.
-
-    The tolerance values are positive, typically very small numbers.  The
-    relative difference (`rtol` * abs(`b`)) and the absolute difference
-    `atol` are added together to compare against the absolute difference
-    between `a` and `b`.
-
-    Parameters
-    ----------
-    a, b : array_like
-        Input arrays to compare.
-    rtol : float
-        The relative tolerance parameter (see Notes).
-    atol : float
-        The absolute tolerance parameter (see Notes).
-
-    Returns
-    -------
-    y : bool
-        Returns True if the two arrays are equal within the given
-        tolerance; False otherwise. If either array contains NaN, then
-        False is returned.
-
-    See Also
-    --------
-    all, any, alltrue, sometrue
-
-    Notes
-    -----
-    If the following equation is element-wise True, then allclose returns
-    True.
-
-     absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
-
-    Examples
-    --------
-    >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
-    False
-    >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
-    True
-    >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
-    False
-    >>> np.allclose([1.0, np.nan], [1.0, np.nan])
-    False
-
-    """
-    x = array(a, copy=False)
-    y = array(b, copy=False)
-    xinf = isinf(x)
-    if not all(xinf == isinf(y)):
-        return False
-    if not any(xinf):
-        return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
-    if not all(x[xinf] == y[xinf]):
-        return False
-    x = x[~xinf]
-    y = y[~xinf]
-    return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
-
-def array_equal(a1, a2):
-    """
-    True if two arrays have the same shape and elements, False otherwise.
-
-    Parameters
-    ----------
-    a1, a2 : array_like
-        Input arrays.
-
-    Returns
-    -------
-    b : bool
-        Returns True if the arrays are equal.
-
-    See Also
-    --------
-    allclose: Returns True if two arrays are element-wise equal within a
-              tolerance.
-    array_equiv: Returns True if input arrays are shape consistent and all
-                 elements equal.
-
-    Examples
-    --------
-    >>> np.array_equal([1, 2], [1, 2])
-    True
-    >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
-    True
-    >>> np.array_equal([1, 2], [1, 2, 3])
-    False
-    >>> np.array_equal([1, 2], [1, 4])
-    False
-
-    """
-    try:
-        a1, a2 = asarray(a1), asarray(a2)
-    except:
-        return False
-    if a1.shape != a2.shape:
-        return False
-    return bool(logical_and.reduce(equal(a1,a2).ravel()))
-
-def array_equiv(a1, a2):
-    """
-    Returns True if input arrays are shape consistent and all elements equal.
-
-    Shape consistent means they are either the same shape, or one input array
-    can be broadcasted to create the same shape as the other one.
-
-    Parameters
-    ----------
-    a1, a2 : array_like
-        Input arrays.
-
-    Returns
-    -------
-    out : bool
-        True if equivalent, False otherwise.
-
-    Examples
-    --------
-    >>> np.array_equiv([1, 2], [1, 2])
-    >>> True
-    >>> np.array_equiv([1, 2], [1, 3])
-    >>> False
-
-    Showing the shape equivalence:
-
-    >>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
-    >>> True
-    >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
-    >>> False
-
-    >>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
-    >>> False
-
-    """
-    try:
-        a1, a2 = asarray(a1), asarray(a2)
-    except:
-        return False
-    try:
-        return bool(logical_and.reduce(equal(a1,a2).ravel()))
-    except ValueError:
-        return False
-
-
-_errdict = {"ignore":ERR_IGNORE,
-            "warn":ERR_WARN,
-            "raise":ERR_RAISE,
-            "call":ERR_CALL,
-            "print":ERR_PRINT,
-            "log":ERR_LOG}
-
-_errdict_rev = {}
-for key in _errdict.keys():
-    _errdict_rev[_errdict[key]] = key
-del key
-
-def seterr(all=None, divide=None, over=None, under=None, invalid=None):
-    """
-    Set how floating-point errors are handled.
-
-    Note that operations on integer scalar types (such as `int16`) are
-    handled like floating point, and are affected by these settings.
-
-    Parameters
-    ----------
-    all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
-        Set treatment for all types of floating-point errors at once:
-
-        - ignore: Take no action when the exception occurs.
-        - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
-        - raise: Raise a `FloatingPointError`.
-        - call: Call a function specified using the `seterrcall` function.
-        - print: Print a warning directly to ``stdout``.
-        - log: Record error in a Log object specified by `seterrcall`.
-
-        The default is not to change the current behavior.
-    divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
-        Treatment for division by zero.
-    over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
-        Treatment for floating-point overflow.
-    under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
-        Treatment for floating-point underflow.
-    invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
-        Treatment for invalid floating-point operation.
-
-    Returns
-    -------
-    old_settings : dict
-        Dictionary containing the old settings.
-
-    See also
-    --------
-    seterrcall : set a callback function for the 'call' mode.
-    geterr, geterrcall
-
-    Notes
-    -----
-    The floating-point exceptions are defined in the IEEE 754 standard [1]:
-
-    - Division by zero: infinite result obtained from finite numbers.
-    - Overflow: result too large to be expressed.
-    - Underflow: result so close to zero that some precision
-      was lost.
-    - Invalid operation: result is not an expressible number, typically
-      indicates that a NaN was produced.
-
-    .. [1] http://en.wikipedia.org/wiki/IEEE_754
-
-    Examples
-    --------
-    >>> np.seterr(over='raise')
-    {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
-     'under': 'ignore'}
-    >>> np.seterr(all='ignore')  # reset to default
-    {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
-
-    >>> np.int16(32000) * np.int16(3)
-    30464
-    >>> old_settings = np.seterr(all='warn', over='raise')
-    >>> np.int16(32000) * np.int16(3)
-    Traceback (most recent call last):
-      File "<stdin>", line 1, in <module>
-    FloatingPointError: overflow encountered in short_scalars
-
-    >>> np.seterr(all='print')
-    {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
-    >>> np.int16(32000) * np.int16(3)
-    Warning: overflow encountered in short_scalars
-    30464
-
-    """
-
-    pyvals = umath.geterrobj()
-    old = geterr()
-
-    if divide is None: divide = all or old['divide']
-    if over is None: over = all or old['over']
-    if under is None: under = all or old['under']
-    if invalid is None: invalid = all or old['invalid']
-
-    maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
-                 (_errdict[over] << SHIFT_OVERFLOW ) +
-                 (_errdict[under] << SHIFT_UNDERFLOW) +
-                 (_errdict[invalid] << SHIFT_INVALID))
-
-    pyvals[1] = maskvalue
-    umath.seterrobj(pyvals)
-    return old
-
-
-def geterr():
-    """
-    Get the current way of handling floating-point errors.
-
-    Returns
-    -------
-    res : dict
-        A dictionary with keys "divide", "over", "under", and "invalid",
-        whose values are from the strings "ignore", "print", "log", "warn",
-        "raise", and "call". The keys represent possible floating-point
-        exceptions, and the values define how these exceptions are handled.
-
-    See Also
-    --------
-    geterrcall, seterr, seterrcall
-
-    Notes
-    -----
-    For complete documentation of the types of floating-point exceptions and
-    treatment options, see `seterr`.
-
-    Examples
-    --------
-    >>> np.geterr()  # default is all set to 'ignore'
-    {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
-    'under': 'ignore'}
-    >>> np.arange(3.) / np.arange(3.)
-    array([ NaN,   1.,   1.])
-
-    >>> oldsettings = np.seterr(all='warn', over='raise')
-    >>> np.geterr()
-    {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
-    >>> np.arange(3.) / np.arange(3.)
-    __main__:1: RuntimeWarning: invalid value encountered in divide
-    array([ NaN,   1.,   1.])
-
-    """
-    maskvalue = umath.geterrobj()[1]
-    mask = 7
-    res = {}
-    val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
-    res['divide'] = _errdict_rev[val]
-    val = (maskvalue >> SHIFT_OVERFLOW) & mask
-    res['over'] = _errdict_rev[val]
-    val = (maskvalue >> SHIFT_UNDERFLOW) & mask
-    res['under'] = _errdict_rev[val]
-    val = (maskvalue >> SHIFT_INVALID) & mask
-    res['invalid'] = _errdict_rev[val]
-    return res
-
-def setbufsize(size):
-    """
-    Set the size of the buffer used in ufuncs.
-
-    Parameters
-    ----------
-    size : int
-        Size of buffer.
-
-    """
-    if size > 10e6:
-        raise ValueError, "Buffer size, %s, is too big." % size
-    if size < 5:
-        raise ValueError, "Buffer size, %s, is too small." %size
-    if size % 16 != 0:
-        raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
-
-    pyvals = umath.geterrobj()
-    old = getbufsize()
-    pyvals[0] = size
-    umath.seterrobj(pyvals)
-    return old
-
-def getbufsize():
-    """Return the size of the buffer used in ufuncs.
-    """
-    return umath.geterrobj()[0]
-
-def seterrcall(func):
-    """
-    Set the floating-point error callback function or log object.
-
-    There are two ways to capture floating-point error messages.  The first
-    is to set the error-handler to 'call', using `seterr`.  Then, set
-    the function to call using this function.
-
-    The second is to set the error-handler to 'log', using `seterr`.
-    Floating-point errors then trigger a call to the 'write' method of
-    the provided object.
-
-    Parameters
-    ----------
-    func : callable f(err, flag) or object with write method
-        Function to call upon floating-point errors ('call'-mode) or
-        object whose 'write' method is used to log such message ('log'-mode).
-
-        The call function takes two arguments. The first is the
-        type of error (one of "divide", "over", "under", or "invalid"),
-        and the second is the status flag.  The flag is a byte, whose
-        least-significant bits indicate the status::
-
-          [0 0 0 0 invalid over under invalid]
-
-        In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
-
-        If an object is provided, its write method should take one argument,
-        a string.
-
-    Returns
-    -------
-    h : callable or log instance
-        The old error handler.
-
-    See Also
-    --------
-    seterr, geterr, geterrcall
-
-    Examples
-    --------
-    Callback upon error:
-
-    >>> def err_handler(type, flag):
-        print "Floating point error (%s), with flag %s" % (type, flag)
-    ...
-
-    >>> saved_handler = np.seterrcall(err_handler)
-    >>> save_err = np.seterr(all='call')
-
-    >>> np.array([1,2,3])/0.0
-    Floating point error (divide by zero), with flag 1
-    array([ Inf,  Inf,  Inf])
-
-    >>> np.seterrcall(saved_handler)
-    >>> np.seterr(**save_err)
-
-    Log error message:
-
-    >>> class Log(object):
-            def write(self, msg):
-                print "LOG: %s" % msg
-    ...
-
-    >>> log = Log()
-    >>> saved_handler = np.seterrcall(log)
-    >>> save_err = np.seterr(all='log')
-
-    >>> np.array([1,2,3])/0.0
-    LOG: Warning: divide by zero encountered in divide
-
-    >>> np.seterrcall(saved_handler)
-    >>> np.seterr(**save_err)
-
-    """
-    if func is not None and not callable(func):
-        if not hasattr(func, 'write') or not callable(func.write):
-            raise ValueError, "Only callable can be used as callback"
-    pyvals = umath.geterrobj()
-    old = geterrcall()
-    pyvals[2] = func
-    umath.seterrobj(pyvals)
-    return old
-
-def geterrcall():
-    """
-    Return the current callback function used on floating-point errors.
-
-    When the error handling for a floating-point error (one of "divide",
-    "over", "under", or "invalid") is set to 'call' or 'log', the function
-    that is called or the log instance that is written to is returned by
-    `geterrcall`. This function or log instance has been set with
-    `seterrcall`.
-
-    Returns
-    -------
-    errobj : callable, log instance or None
-        The current error handler. If no handler was set through `seterrcall`,
-        ``None`` is returned.
-
-    See Also
-    --------
-    seterrcall, seterr, geterr
-
-    Notes
-    -----
-    For complete documentation of the types of floating-point exceptions and
-    treatment options, see `seterr`.
-
-    Examples
-    --------
-    >>> np.geterrcall()  # we did not yet set a handler, returns None
-
-    >>> oldsettings = np.seterr(all='call')
-    >>> def err_handler(type, flag):
-    ...     print "Floating point error (%s), with flag %s" % (type, flag)
-    >>> oldhandler = np.seterrcall(err_handler)
-    >>> np.array([1,2,3])/0.0
-    Floating point error (divide by zero), with flag 1
-    array([ Inf,  Inf,  Inf])
-    >>> cur_handler = np.geterrcall()
-    >>> cur_handler is err_handler
-    True
-
-    """
-    return umath.geterrobj()[2]
-
-class _unspecified(object):
-    pass
-_Unspecified = _unspecified()
-
-class errstate(object):
-    """with errstate(**state): --> operations in following block use given state.
-
-    # Set error handling to known state.
-    >>> _ = np.seterr(invalid='raise', divide='raise', over='raise',
-    ...               under='ignore')
-
-    >>> a = -np.arange(3)
-    >>> with np.errstate(invalid='ignore'): # doctest: +SKIP
-    ...     print np.sqrt(a)                # with statement requires Python 2.5
-    [ 0.     -1.#IND -1.#IND]
-    >>> print np.sqrt(a.astype(complex))
-    [ 0.+0.j          0.+1.j          0.+1.41421356j]
-    >>> print np.sqrt(a)
-    Traceback (most recent call last):
-     ...
-    FloatingPointError: invalid value encountered in sqrt
-    >>> with np.errstate(divide='ignore'):  # doctest: +SKIP
-    ...     print a/0
-    [0 0 0]
-    >>> print a/0
-    Traceback (most recent call last):
-        ...
-    FloatingPointError: divide by zero encountered in divide
-
-    """
-    # Note that we don't want to run the above doctests because they will fail
-    # without a from __future__ import with_statement
-    def __init__(self, **kwargs):
-        self.call = kwargs.pop('call',_Unspecified)
-        self.kwargs = kwargs
-    def __enter__(self):
-        self.oldstate = seterr(**self.kwargs)
-        if self.call is not _Unspecified:
-            self.oldcall = seterrcall(self.call)
-    def __exit__(self, *exc_info):
-        seterr(**self.oldstate)
-        if self.call is not _Unspecified:
-            seterrcall(self.oldcall)
-
-def _setdef():
-    defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
-    umath.seterrobj(defval)
-
-# set the default values
-_setdef()
-
-Inf = inf = infty = Infinity = PINF
-nan = NaN = NAN
-False_ = bool_(False)
-True_ = bool_(True)
-
-import fromnumeric
-from fromnumeric import *
-extend_all(fromnumeric)
diff --git a/cobra/oven/danielhyduke/jython/numpy/core/numerictypes.py b/cobra/oven/danielhyduke/jython/numpy/core/numerictypes.py
deleted file mode 100755
index f8683ba..0000000
--- a/cobra/oven/danielhyduke/jython/numpy/core/numerictypes.py
+++ /dev/null
@@ -1,734 +0,0 @@
-#TODO: convert this to use the cern.colt types.
-"""numerictypes: Define the numeric type objects
-
-This module is designed so 'from numerictypes import *' is safe.
-Exported symbols include:
-
-  Dictionary with all registered number types (including aliases):
-    typeDict
-
-  Type objects (not all will be available, depends on platform):
-      see variable sctypes for which ones you have
-
-    Bit-width names
-
-    int8 int16 int32 int64 int128
-    uint8 uint16 uint32 uint64 uint128
-    float16 float32 float64 float96 float128 float256
-    complex32 complex64 complex128 complex192 complex256 complex512
-    datetime64 timedelta64
-
-    c-based names
-
-    bool_
-
-    object_
-
-    void, str_, unicode_
-
-    byte, ubyte,
-    short, ushort
-    intc, uintc,
-    intp, uintp,
-    int_, uint,
-    longlong, ulonglong,
-
-
-    single, csingle,
-    float_, complex_,
-    longfloat, clongfloat,
-
-    datetime, timedelta,  (these inherit from timeinteger which inherits from signedinteger)
-    
-
-   As part of the type-hierarchy:    xx -- is bit-width
-
-   generic
-     +-> bool_                                  (kind=b)
-     +-> number                                 (kind=i)
-     |     integer
-     |     signedinteger   (intxx)
-     |     byte
-     |     short
-     |     intc
-     |     intp           int0
-     |     int_
-     |     longlong
-     +-> unsignedinteger  (uintxx)              (kind=u)
-     |     ubyte
-     |     ushort
-     |     uintc
-     |     uintp          uint0
-     |     uint_
-     |     ulonglong
-     +-> inexact
-     |   +-> floating           (floatxx)       (kind=f)
-     |   |     single
-     |   |     float_  (double)
-     |   |     longfloat
-     |   \-> complexfloating    (complexxx)     (kind=c)
-     |         csingle  (singlecomplex)
-     |         complex_ (cfloat, cdouble)
-     |         clongfloat (longcomplex)
-     +-> flexible
-     |     character
-     |     str_     (string_)                   (kind=S)
-     |     unicode_                             (kind=U)
-     |     void                                 (kind=V)
-     |
-     \-> object_ (not used much)                (kind=O)
-"""
-
-# we add more at the bottom
-__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
-           'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
-           'maximum_sctype', 'issctype', 'typecodes', 'find_common_type']
-
-#from numpy.core.multiarray import typeinfo, ndarray, array, empty, dtype
-
-#TODO: Create these items and import them correctly
-
-import types as _types
-
-# we don't export these for import *, but we do want them accessible
-# as numerictypes.bool, etc.
-from __builtin__ import bool, int, long, float, complex, object, unicode, str
-
-# String-handling utilities to avoid locale-dependence.
-
-# "import string" is costly to import!
-# Construct the translation tables directly
-#   "A" = chr(65), "a" = chr(97)
-_all_chars = map(chr, range(256))
-_ascii_upper = _all_chars[65:65+26]
-_ascii_lower = _all_chars[97:97+26]
-LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
-UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
-
-#import string
-# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
-#          LOWER_TABLE)
-# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \
-#          UPPER_TABLE)
-#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
-#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
-
-def english_lower(s):
-    """ Apply English case rules to convert ASCII strings to all lower case.
-
-    This is an internal utility function to replace calls to str.lower() such
-    that we can avoid changing behavior with changing locales. In particular,
-    Turkish has distinct dotted and dotless variants of the Latin letter "I" in
-    both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
-
-    Parameters
-    ----------
-    s : str
-
-    Returns
-    -------
-    lowered : str
-
-    Examples
-    --------
-    >>> from numpy.core.numerictypes import english_lower
-    >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
-    'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
-    >>> english_lower('')
-    ''
-    """
-    lowered = s.translate(LOWER_TABLE)
-    return lowered
-
-def english_upper(s):
-    """ Apply English case rules to convert ASCII strings to all upper case.
-
-    This is an internal utility function to replace calls to str.upper() such
-    that we can avoid changing behavior with changing locales. In particular,
-    Turkish has distinct dotted and dotless variants of the Latin letter "I" in
-    both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
-
-    Parameters
-    ----------
-    s : str
-
-    Returns
-    -------
-    uppered : str
-
-    Examples
-    --------
-    >>> from numpy.core.numerictypes import english_upper
-    >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
-    'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
-    >>> english_upper('')
-    ''
-    """
-    uppered = s.translate(UPPER_TABLE)
-    return uppered
-
-def english_capitalize(s):
-    """ Apply English case rules to convert the first character of an ASCII
-    string to upper case.
-
-    This is an internal utility function to replace calls to str.capitalize()
-    such that we can avoid changing behavior with changing locales.
-
-    Parameters
-    ----------
-    s : str
-
-    Returns
-    -------
-    capitalized : str
-
-    Examples
-    --------
-    >>> from numpy.core.numerictypes import english_capitalize
-    >>> english_capitalize('int8')
-    'Int8'
-    >>> english_capitalize('Int8')
-    'Int8'
-    >>> english_capitalize('')
-    ''
-    """
-    if s:
-        return english_upper(s[0]) + s[1:]
-    else:
-        return s
-
-
-sctypeDict = {}      # Contains all leaf-node scalar types with aliases
-sctypeNA = {}        # Contails all leaf-node types -> numarray type equivalences
-allTypes = {}      # Collect the types we will add to the module here
-
-def _evalname(name):
-    k = 0
-    for ch in name:
-        if ch in '0123456789':
-            break
-        k += 1
-    try:
-        bits = int(name[k:])
-    except ValueError:
-        bits = 0
-    base = name[:k]
-    return base, bits
-
-def bitname(obj):
-    """Return a bit-width name for a given type object"""
-    name = obj.__name__
-    base = ''
-    char = ''
-    try:
-        if name[-1] == '_':
-            newname = name[:-1]
-        else:
-            newname = name
-        info = typeinfo[english_upper(newname)]
-        assert(info[-1] == obj)  # sanity check
-        bits = info[2]
-
-    except KeyError:     # bit-width name
-        base, bits = _evalname(name)
-        char = base[0]
-
-    if name == 'bool_':
-        char = 'b'
-        base = 'bool'
-    elif name=='string_':
-        char = 'S'
-        base = 'string'
-    elif name=='unicode_':
-        char = 'U'
-        base = 'unicode'
-    elif name=='void':
-        char = 'V'
-        base = 'void'
-    elif name=='object_':
-        char = 'O'
-        base = 'object'
-        bits = 0
-
-    bytes = bits / 8
-
-    if char != '' and bytes != 0:
-        char = "%s%d" % (char, bytes)
-
-    return base, bits, char
-
-
-def _add_types():
-    for a in typeinfo.keys():
-        name = english_lower(a)
-        if isinstance(typeinfo[a], tuple):
-            typeobj = typeinfo[a][-1]
-
-            # define C-name and insert typenum and typechar references also
-            allTypes[name] = typeobj
-            sctypeDict[name] = typeobj
-            sctypeDict[typeinfo[a][0]] = typeobj
-            sctypeDict[typeinfo[a][1]] = typeobj
-
-        else:  # generic class
-            allTypes[name] = typeinfo[a]
-_add_types()
-
-def _add_aliases():
-    for a in typeinfo.keys():
-        name = english_lower(a)
-        if not isinstance(typeinfo[a], tuple):
-            continue
-        typeobj = typeinfo[a][-1]
-        # insert bit-width version for this class (if relevant)
-        base, bit, char = bitname(typeobj)
-        if base[-3:] == 'int' or char[0] in 'ui': continue
-        if base != '':
-            myname = "%s%d" % (base, bit)
-            if (name != 'longdouble' and name != 'clongdouble') or \
-                   myname not in allTypes.keys():
-                allTypes[myname] = typeobj
-                sctypeDict[myname] = typeobj
-                if base == 'complex':
-                    na_name = '%s%d' % (english_capitalize(base), bit/2)
-                elif base == 'bool':
-                    na_name = english_capitalize(base)
-                    sctypeDict[na_name] = typeobj
-                else:
-                    na_name = "%s%d" % (english_capitalize(base), bit)
-                    sctypeDict[na_name] = typeobj
-                sctypeNA[na_name] = typeobj
-                sctypeDict[na_name] = typeobj
-                sctypeNA[typeobj] = na_name
-                sctypeNA[typeinfo[a][0]] = na_name
-        if char != '':
-            sctypeDict[char] = typeobj
-            sctypeNA[char] = na_name
-_add_aliases()
-
-# Integers handled so that
-# The int32, int64 types should agree exactly with
-#  PyArray_INT32, PyArray_INT64 in C
-# We need to enforce the same checking as is done
-#  in arrayobject.h where the order of getting a
-#  bit-width match is:
-#       long, longlong, int, short, char
-#   for int8, int16, int32, int64, int128
-
-def _add_integer_aliases():
-    _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
-    for ctype in _ctypes:
-        val = typeinfo[ctype]
-        bits = val[2]
-        charname = 'i%d' % (bits/8,)
-        ucharname = 'u%d' % (bits/8,)
-        intname = 'int%d' % bits
-        UIntname = 'UInt%d' % bits
-        Intname = 'Int%d' % bits
-        uval = typeinfo['U'+ctype]
-        typeobj = val[-1]
-        utypeobj = uval[-1]
-        if intname not in allTypes.keys():
-            uintname = 'uint%d' % bits
-            allTypes[intname] = typeobj
-            allTypes[uintname] = utypeobj
-            sctypeDict[intname] = typeobj
-            sctypeDict[uintname] = utypeobj
-            sctypeDict[Intname] = typeobj
-            sctypeDict[UIntname] = utypeobj
-            sctypeDict[charname] = typeobj
-            sctypeDict[ucharname] = utypeobj
-            sctypeNA[Intname] = typeobj
-            sctypeNA[UIntname] = utypeobj
-            sctypeNA[charname] = typeobj
-            sctypeNA[ucharname] = utypeobj
-        sctypeNA[typeobj] = Intname
-        sctypeNA[utypeobj] = UIntname
-        sctypeNA[val[0]] = Intname
-        sctypeNA[uval[0]] = UIntname
-_add_integer_aliases()
-
-# We use these later
-void = allTypes['void']
-generic = allTypes['generic']
-
-#
-# Rework the Python names (so that float and complex and int are consistent
-#                            with Python usage)
-#
-def _set_up_aliases():
-    type_pairs = [('complex_', 'cdouble'),
-                  ('int0', 'intp'),
-                  ('uint0', 'uintp'),
-                  ('single', 'float'),
-                  ('csingle', 'cfloat'),
-                  ('singlecomplex', 'cfloat'),
-                  ('float_', 'double'),
-                  ('intc', 'int'),
-                  ('uintc', 'uint'),
-                  ('int_', 'long'),
-                  ('uint', 'ulong'),
-                  ('cfloat', 'cdouble'),
-                  ('longfloat', 'longdouble'),
-                  ('clongfloat', 'clongdouble'),
-                  ('longcomplex', 'clongdouble'),
-                  ('bool_', 'bool'),
-                  ('unicode_', 'unicode'),
-                  ('str_', 'string'),
-                  ('string_', 'string'),
-                  ('object_', 'object')]
-    for alias, t in type_pairs:
-        allTypes[alias] = allTypes[t]
-        sctypeDict[alias] = sctypeDict[t]
-    # Remove aliases overriding python types and modules
-    for t in ['ulong', 'object', 'unicode', 'int', 'long', 'float',
-              'complex', 'bool', 'string']:
-        try:
-            del allTypes[t]
-            del sctypeDict[t]
-        except KeyError:
-            pass
-_set_up_aliases()
-
-# Now, construct dictionary to lookup character codes from types
-_sctype2char_dict = {}
-def _construct_char_code_lookup():
-    for name in typeinfo.keys():
-        tup = typeinfo[name]
-        if isinstance(tup, tuple):
-            if tup[0] not in ['p','P']:
-                _sctype2char_dict[tup[-1]] = tup[0]
-_construct_char_code_lookup()
-
-
-sctypes = {'int': [],
-           'uint':[],
-           'float':[],
-           'complex':[],
-           'others':[bool,object,str,unicode,void]}
-
-def _add_array_type(typename, bits):
-    try:
-        t = allTypes['%s%d' % (typename, bits)]
-    except KeyError:
-        pass
-    else:
-        sctypes[typename].append(t)
-
-def _set_array_types():
-    ibytes = [1, 2, 4, 8, 16, 32, 64]
-    fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
-    for bytes in ibytes:
-        bits = 8*bytes
-        _add_array_type('int', bits)
-        _add_array_type('uint', bits)
-    for bytes in fbytes:
-        bits = 8*bytes
-        _add_array_type('float', bits)
-        _add_array_type('complex', 2*bits)
-    _gi = dtype('p')
-    if _gi.type not in sctypes['int']:
-        indx = 0
-        sz = _gi.itemsize
-        _lst = sctypes['int']
-        while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
-            indx += 1
-        sctypes['int'].insert(indx, _gi.type)
-        sctypes['uint'].insert(indx, dtype('P').type)
-_set_array_types()
-
-
-genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
-                   'int32', 'uint32', 'int64', 'uint64', 'int128',
-                   'uint128', 'float16',
-                   'float32', 'float64', 'float80', 'float96', 'float128',
-                   'float256',
-                   'complex32', 'complex64', 'complex128', 'complex160',
-                   'complex192', 'complex256', 'complex512', 'object']
-
-def maximum_sctype(t):
-    """returns the sctype of highest precision of the same general kind as 't'"""
-    g = obj2sctype(t)
-    if g is None:
-        return t
-    t = g
-    name = t.__name__
-    base, bits = _evalname(name)
-    if bits == 0:
-        return t
-    else:
-        return sctypes[base][-1]
-
-_python_types = {int : 'int_',
-                 float: 'float_',
-                 complex: 'complex_',
-                 bool: 'bool_',
-                 str: 'string_',
-                 unicode: 'unicode_',
-                 _types.BufferType: 'void',
-                }
-def _python_type(t):
-    """returns the type corresponding to a certain Python type"""
-    if not isinstance(t, _types.TypeType):
-        t = type(t)
-    return allTypes[_python_types.get(t, 'object_')]
-
-def issctype(rep):
-    """Determines whether the given object represents
-    a numeric array type."""
-    if not isinstance(rep, (type, dtype)):
-        return False
-    try:
-        res = obj2sctype(rep)
-        if res and res != object_:
-            return True
-        return False
-    except:
-        return False
-
-def obj2sctype(rep, default=None):
-    try:
-        if issubclass(rep, generic):
-            return rep
-    except TypeError:
-        pass
-    if isinstance(rep, dtype):
-        return rep.type
-    if isinstance(rep, type):
-        return _python_type(rep)
-    if isinstance(rep, ndarray):
-        return rep.dtype.type
-    try:
-        res = dtype(rep)
-    except:
-        return default
-    return res.type
-
-
-def issubclass_(arg1, arg2):
-    try:
-        return issubclass(arg1, arg2)
-    except TypeError:
-        return False
-
-def issubsctype(arg1, arg2):
-    return issubclass(obj2sctype(arg1), obj2sctype(arg2))
-
-def issubdtype(arg1, arg2):
-    """
-    Returns True if first argument is a typecode lower/equal in type hierarchy.
-
-    Parameters
-    ----------
-    arg1 : dtype_like
-        dtype or string representing a typecode.
-    arg2 : dtype_like
-        dtype or string representing a typecode.
-
-    Returns
-    -------
-    out : bool
-
-    See Also
-    --------
-    numpy.core.numerictypes : Overview of numpy type hierarchy.
-
-    Examples
-    --------
-    >>> np.issubdtype('S1', str)
-    True
-    >>> np.issubdtype(np.float64, np.float32)
-    False
-
-    """
-    if issubclass_(arg2, generic):
-        return issubclass(dtype(arg1).type, arg2)
-    mro = dtype(arg2).type.mro()
-    if len(mro) > 1:
-        val = mro[1]
-    else:
-        val = mro[0]
-    return issubclass(dtype(arg1).type, val)
-
-
-# This dictionary allows look up based on any alias for an array data-type
-class _typedict(dict):
-    def __getitem__(self, obj):
-        return dict.__getitem__(self, obj2sctype(obj))
-
-nbytes = _typedict()
-_alignment = _typedict()
-_maxvals = _typedict()
-_minvals = _typedict()
-def _construct_lookups():
-    for name, val in typeinfo.iteritems():
-        if not isinstance(val, tuple):
-            continue
-        obj = val[-1]
-        nbytes[obj] = val[2] / 8
-        _alignment[obj] = val[3]
-        if (len(val) > 5):
-            _maxvals[obj] = val[4]
-            _minvals[obj] = val[5]
-        else:
-            _maxvals[obj] = None
-            _minvals[obj] = None
-
-_construct_lookups()
-
-def sctype2char(sctype):
-    sctype = obj2sctype(sctype)
-    if sctype is None:
-        raise ValueError, "unrecognized type"
-    return _sctype2char_dict[sctype]
-
-# Create dictionary of casting functions that wrap sequences
-# indexed by type or type character
-
-
-cast = _typedict()
-ScalarType = [_types.IntType, _types.FloatType,
-              _types.ComplexType, _types.LongType, _types.BooleanType,
-              _types.StringType, _types.UnicodeType, _types.BufferType]
-ScalarType.extend(_sctype2char_dict.keys())
-ScalarType = tuple(ScalarType)
-for key in _sctype2char_dict.keys():
-    cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
-
-# Create the typestring lookup dictionary
-_typestr = _typedict()
-for key in _sctype2char_dict.keys():
-    if issubclass(key, allTypes['flexible']):
-        _typestr[key] = _sctype2char_dict[key]
-    else:
-        _typestr[key] = empty((1,),key).dtype.str[1:]
-
-# Make sure all typestrings are in sctypeDict
-for key, val in _typestr.items():
-    if val not in sctypeDict:
-        sctypeDict[val] = key
-
-# Add additional strings to the sctypeDict
-
-_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string', ('str', allTypes['string_']),
-          'unicode', 'object', ('a', allTypes['string_'])]
-
-for name in _toadd:
-    if isinstance(name, tuple):
-        sctypeDict[name[0]] = name[1]
-    else:
-        sctypeDict[name] = allTypes['%s_' % name]
-
-del _toadd, name
-
-# Now add the types we've determined to this module
-for key in allTypes:
-    globals()[key] = allTypes[key]
-    __all__.append(key)
-
-del key
-
-typecodes = {'Character':'c',
-             'Integer':'bhilqp',
-             'UnsignedInteger':'BHILQP',
-             'Float':'fdg',
-             'Complex':'FDG',
-             'AllInteger':'bBhHiIlLqQpP',
-             'AllFloat':'fdgFDG',
-             'Datetime': 'Mm',
-             'All':'?bhilqpBHILQPfdgFDGSUVOMm'}
-
-# backwards compatibility --- deprecated name
-typeDict = sctypeDict
-typeNA = sctypeNA
-
-# b -> boolean
-# u -> unsigned integer
-# i -> signed integer
-# f -> floating point
-# c -> complex
-# M -> datetime
-# m -> timedelta
-# S -> string
-# U -> Unicode string
-# V -> record
-# O -> Python object
-_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
-
-__test_types = typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
-__len_test_types = len(__test_types)
-
-# Keep incrementing until a common type both can be coerced to
-#  is found.  Otherwise, return None
-def _find_common_coerce(a, b):
-    if a > b:
-        return a
-    try:
-        thisind = __test_types.index(a.char)
-    except ValueError:
-        return None
-    return _can_coerce_all([a,b], start=thisind)
-
-# Find a data-type that all data-types in a list can be coerced to
-def _can_coerce_all(dtypelist, start=0):
-    N = len(dtypelist)
-    if N == 0:
-        return None
-    if N == 1:
-        return dtypelist[0]
-    thisind = start
-    while thisind < __len_test_types:
-        newdtype = dtype(__test_types[thisind])
-        numcoerce = len([x for x in dtypelist if newdtype >= x])
-        if numcoerce == N:
-            return newdtype
-        thisind += 1
-    return None
-
-def find_common_type(array_types, scalar_types):
-    """
-    Determine common type following standard coercion rules
-
-    Parameters
-    ----------
-    array_types : sequence
-        A list of dtype convertible objects representing arrays
-    scalar_types : sequence
-        A list of dtype convertible objects representing scalars
-
-    Returns
-    -------
-    datatype : dtype
-        The common data-type which is the maximum of the array_types
-        ignoring the scalar_types unless the maximum of the scalar_types
-        is of a different kind.
-
-        If the kinds is not understood, then None is returned.
-
-    See Also
-    --------
-    dtype
-
-    """
-    array_types = [dtype(x) for x in array_types]
-    scalar_types = [dtype(x) for x in scalar_types]
-
-    maxa = _can_coerce_all(array_types)
-    maxsc = _can_coerce_all(scalar_types)
-
-    if maxa is None:
-        return maxsc
-
-    if maxsc is None:
-        return maxa
-
-    try:
-        index_a = _kind_list.index(maxa.kind)
-        index_sc = _kind_list.index(maxsc.kind)
-    except ValueError:
-        return None
-
-    if index_sc > index_a:
-        return _find_common_coerce(maxsc,maxa)
-    else:
-        return maxa
diff --git a/cobra/oven/danielhyduke/jython/scipy/README b/cobra/oven/danielhyduke/jython/scipy/README
deleted file mode 100644
index 22f7815..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/README
+++ /dev/null
@@ -1 +0,0 @@
-In the future, we will be working on a scipy for java implementation that uses cern.colt matrices as the backend and provides a java interface that mirrors scipy.
diff --git a/cobra/oven/danielhyduke/jython/scipy/__init__.py b/cobra/oven/danielhyduke/jython/scipy/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/__init__.py b/cobra/oven/danielhyduke/jython/scipy/sparse/__init__.py
deleted file mode 100644
index 6ea4aa3..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"Sparse Matrix Support"
-
-from info import __doc__
-
-from base import *
-from csr import *
-from csc import *
-from lil import *
-from dok import *
-from coo import *
-from dia import *
-from bsr import *
-
-from construct import *
-from extract import *
-
-#from spfuncs import *
-
-__all__ = filter(lambda s:not s.startswith('_'),dir())
-from numpy.testing import Tester
-test = Tester().test
-bench = Tester().bench
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/base.py b/cobra/oven/danielhyduke/jython/scipy/sparse/base.py
deleted file mode 100644
index cc9311a..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/base.py
+++ /dev/null
@@ -1,615 +0,0 @@
-"""Base class for sparse matrices"""
-
-__all__ = ['spmatrix', 'isspmatrix', 'issparse',
-        'SparseWarning','SparseEfficiencyWarning']
-
-from warnings import warn
-
-import numjy as np
-
-from sputils import isdense, isscalarlike, isintlike
-
-
-class SparseWarning(Warning): pass
-class SparseFormatWarning(SparseWarning): pass
-class SparseEfficiencyWarning(SparseWarning): pass
-
-
-# The formats that we might potentially understand.
-_formats = {'csc':[0, "Compressed Sparse Column"],
-            'csr':[1, "Compressed Sparse Row"],
-            'dok':[2, "Dictionary Of Keys"],
-            'lil':[3, "LInked List"],
-            'dod':[4, "Dictionary of Dictionaries"],
-            'sss':[5, "Symmetric Sparse Skyline"],
-            'coo':[6, "COOrdinate"],
-            'lba':[7, "Linpack BAnded"],
-            'egd':[8, "Ellpack-itpack Generalized Diagonal"],
-            'dia':[9, "DIAgonal"],
-            'bsr':[10, "Block Sparse Row"],
-            'msr':[11, "Modified compressed Sparse Row"],
-            'bsc':[12, "Block Sparse Column"],
-            'msc':[13, "Modified compressed Sparse Column"],
-            'ssk':[14, "Symmetric SKyline"],
-            'nsk':[15, "Nonsymmetric SKyline"],
-            'jad':[16, "JAgged Diagonal"],
-            'uss':[17, "Unsymmetric Sparse Skyline"],
-            'vbr':[18, "Variable Block Row"],
-            'und':[19, "Undefined"]
-            }
-
-
-MAXPRINT = 50
-
-class spmatrix(object):
-    """ This class provides a base class for all sparse matrices.  It
-    cannot be instantiated.  Most of the work is provided by subclasses.
-    """
-
-    __array_priority__ = 10.1
-    ndim = 2
-    def __init__(self, maxprint=MAXPRINT):
-        self.format = self.__class__.__name__[:3]
-        self._shape = None
-        if self.format == 'spm':
-            raise ValueError, "This class is not intended" \
-                  " to be instantiated directly."
-        self.maxprint = maxprint
-
-    def set_shape(self,shape):
-        shape = tuple(shape)
-
-        if len(shape) != 2:
-            raise ValueError("Only two-dimensional sparse arrays "
-                                     "are supported.")
-        try:
-            shape = int(shape[0]),int(shape[1]) #floats, other weirdness
-        except:
-            raise TypeError('invalid shape')
-
-        if not (shape[0] >= 1 and shape[1] >= 1):
-            raise ValueError('invalid shape')
-
-        if (self._shape != shape) and (self._shape is not None):
-            try:
-                self = self.reshape(shape)
-            except NotImplementedError:
-                raise NotImplementedError("Reshaping not implemented for %s." %
-                                          self.__class__.__name__)
-        self._shape = shape
-
-    def get_shape(self):
-        return self._shape
-
-    shape = property(fget=get_shape, fset=set_shape)
-
-    def reshape(self,shape):
-        raise NotImplementedError
-
-    def astype(self, t):
-        return self.tocsr().astype(t).asformat(self.format)
-
-    def asfptype(self):
-        """Upcast matrix to a floating point format (if necessary)"""
-
-        fp_types = ['f','d','F','D']
-
-        if self.dtype.char in fp_types:
-            return self
-        else:
-            for fp_type in fp_types:
-                if self.dtype <= np.dtype(fp_type):
-                    return self.astype(fp_type)
-
-            raise TypeError,'cannot upcast [%s] to a floating \
-                             point format' % self.dtype.name
-
-    def __iter__(self):
-        for r in xrange(self.shape[0]):
-            yield self[r,:]
-
-    def getmaxprint(self):
-        try:
-            maxprint = self.maxprint
-        except AttributeError:
-            maxprint = MAXPRINT
-        return maxprint
-
-    #def typecode(self):
-    #    try:
-    #        typ = self.dtype.char
-    #    except AttributeError:
-    #        typ = None
-    #    return typ
-
-    def getnnz(self):
-        try:
-            return self.nnz
-        except AttributeError:
-            raise AttributeError, "nnz not defined"
-
-    def getformat(self):
-        try:
-            format = self.format
-        except AttributeError:
-            format = 'und'
-        return format
-
-    @np.deprecate
-    def rowcol(self, num):
-        return (None, None)
-
-    @np.deprecate
-    def getdata(self, num):
-        return None
-
-    @np.deprecate
-    def listprint(self, start, stop):
-        """Provides a way to print over a single index.
-        """
-        return '\n'.join(['  %s\t%s' % (self.rowcol(ind), self.getdata(ind))
-                         for ind in xrange(start,stop)]) + '\n'
-
-    def __repr__(self):
-        nnz = self.getnnz()
-        format = self.getformat()
-        return "<%dx%d sparse matrix of type '%s'\n" \
-               "\twith %d stored elements in %s format>" % \
-               (self.shape + (self.dtype.type, nnz, _formats[format][1]))
-
-    def __str__(self):
-        maxprint = self.getmaxprint()
-
-        A   = self.tocoo()
-        nnz = self.getnnz()
-
-        # helper function, outputs "(i,j)  v"
-        def tostr(row,col,data):
-            triples = zip(zip(row,col),data)
-            return '\n'.join( [ ('  %s\t%s' % t) for t in triples] )
-
-        if nnz > maxprint:
-            half = maxprint // 2
-            out  = tostr(A.row[:half], A.col[:half], A.data[:half])
-            out += "\n  :\t:\n"
-            half = maxprint - maxprint//2
-            out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
-        else:
-            out  = tostr(A.row, A.col, A.data)
-
-        return out
-
-    def __nonzero__(self):  # Simple -- other ideas?
-        return self.getnnz() > 0
-
-    # What should len(sparse) return? For consistency with dense matrices,
-    # perhaps it should be the number of rows?  But for some uses the number of
-    # non-zeros is more important.  For now, raise an exception!
-    def __len__(self):
-        # return self.getnnz()
-        raise TypeError, "sparse matrix length is ambiguous; use getnnz()" \
-                         " or shape[0]"
-
-    def asformat(self, format):
-        """Return this matrix in a given sparse format
-
-        Parameters
-        ----------
-        format : {string, None}
-            desired sparse matrix format
-                - None for no format conversion
-                - "csr" for csr_matrix format
-                - "csc" for csc_matrix format
-                - "lil" for lil_matrix format
-                - "dok" for dok_matrix format and so on
-
-        """
-
-        if format is None or format == self.format:
-            return self
-        else:
-            return getattr(self,'to' + format)()
-
-    ###################################################################
-    #  NOTE: All arithmetic operations use csr_matrix by default.
-    # Therefore a new sparse matrix format just needs to define a
-    # .tocsr() method to provide arithmetic support.  Any of these
-    # methods can be overridden for efficiency.
-    ####################################################################
-
-    def multiply(self, other):
-        """Point-wise multiplication by another matrix
-        """
-        return self.tocsr().multiply(other)
-
-    def __abs__(self):
-        return abs(self.tocsr())
-
-    def __add__(self, other):   # self + other
-        return self.tocsr().__add__(other)
-
-    def __radd__(self, other):  # other + self
-        return self.tocsr().__radd__(other)
-
-    def __sub__(self, other):   # self - other
-        #note: this can't be replaced by self + (-other) for unsigned types
-        return self.tocsr().__sub__(other)
-
-    def __rsub__(self, other):  # other - self
-        return self.tocsr().__rsub__(other)
-
-    # old __mul__ interfaces
-    @np.deprecate
-    def matvec(self,other):
-        return self * other
-
-    @np.deprecate
-    def matmat(self,other):
-        return self * other
-
-    @np.deprecate
-    def dot(self, other):
-        return self * other
-
-    @np.deprecate
-    def rmatvec(self, other, conjugate=True):
-        """Multiplies the vector 'other' by the sparse matrix, returning a
-        dense vector as a result.
-
-        If 'conjugate' is True:
-            - returns A.transpose().conj() * other
-        Otherwise:
-            - returns A.transpose() * other.
-
-        """
-        if conjugate:
-            return self.conj().transpose() * other
-        else:
-            return self.transpose() * other
-
-    def __mul__(self, other):
-        """interpret other and call one of the following
-
-        self._mul_scalar()
-        self._mul_vector()
-        self._mul_multivector()
-        self._mul_sparse_matrix()
-        """
-
-        M,N = self.shape
-
-        if isscalarlike(other):
-            # scalar value
-            return self._mul_scalar(other)
-
-        if issparse(other):
-            if self.shape[1] != other.shape[0]:
-                raise ValueError('dimension mismatch')
-            return self._mul_sparse_matrix(other)
-
-        try:
-            other.shape
-        except AttributeError:
-            # If it's a list or whatever, treat it like a matrix
-            other = np.asanyarray(other)
-
-        other = np.asanyarray(other)
-
-        if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
-            # dense row or column vector
-            if other.shape != (N,) and other.shape != (N,1):
-                raise ValueError('dimension mismatch')
-
-            result = self._mul_vector(np.ravel(other))
-
-            if isinstance(other, np.matrix):
-                result = np.asmatrix(result)
-
-            if other.ndim == 2 and other.shape[1] == 1:
-                # If 'other' was an (nx1) column vector, reshape the result
-                result = result.reshape(-1,1)
-
-            return result
-
-        elif other.ndim == 2:
-            ##
-            # dense 2D array or matrix ("multivector")
-
-            if other.shape[0] != self.shape[1]:
-                raise ValueError('dimension mismatch')
-
-            result = self._mul_multivector(np.asarray(other))
-
-            if isinstance(other, np.matrix):
-                result = np.asmatrix(result)
-
-            return result
-        else:
-            raise ValueError('could not interpret dimensions')
-
-    # by default, use CSR for __mul__ handlers
-    def _mul_scalar(self, other):
-        return self.tocsr()._mul_scalar(other)
-
-    def _mul_vector(self, other):
-        return self.tocsr()._mul_vector(other)
-
-    def _mul_multivector(self, other):
-        return self.tocsr()._mul_multivector(other)
-
-    def _mul_sparse_matrix(self, other):
-        return self.tocsr()._mul_sparse_matrix(other)
-
-    def __rmul__(self, other): # other * self
-        if isscalarlike(other):
-            return self.__mul__(other)
-        else:
-            # Don't use asarray unless we have to
-            try:
-                tr = other.transpose()
-            except AttributeError:
-                tr = np.asarray(other).transpose()
-            return (self.transpose() * tr).transpose()
-
-    ####################
-    # Other Arithmetic #
-    ####################
-
-    def __truediv__(self, other):
-        if isscalarlike(other):
-            return self * (1./other)
-        else:
-            return self.tocsr().__truediv__(other)
-
-    def __div__(self, other):
-        # Always do true division
-        return self.__truediv__(other)
-
-    def __neg__(self):
-        return -self.tocsr()
-
-    def __iadd__(self, other):
-        raise NotImplementedError
-
-    def __isub__(self, other):
-        raise NotImplementedError
-
-    def __imul__(self, other):
-        raise NotImplementedError
-
-    def __idiv__(self, other):
-        return self.__itruediv__(other)
-
-    def __itruediv__(self, other):
-        raise NotImplementedError
-
-    def __pow__(self, other):
-        if self.shape[0] != self.shape[1]:
-            raise TypeError('matrix is not square')
-
-        if isintlike(other):
-            other = int(other)
-            if other < 0:
-                raise ValueError('exponent must be >= 0')
-
-            if other == 0:
-                from construct import identity
-                return identity( self.shape[0], dtype=self.dtype )
-            elif other == 1:
-                return self.copy()
-            else:
-                result = self
-                for i in range(1,other):
-                    result = result*self
-                return result
-        elif isscalarlike(other):
-            raise ValueError('exponent must be an integer')
-        elif isspmatrix(other):
-            warn('Using ** for elementwise multiplication is deprecated.'\
-                    'Use .multiply() instead', DeprecationWarning)
-            return self.multiply(other)
-        else:
-            raise NotImplementedError
-
-
-    def __getattr__(self, attr):
-        if attr == 'A':
-            return self.toarray()
-        elif attr == 'T':
-            return self.transpose()
-        elif attr == 'H':
-            return self.getH()
-        elif attr == 'real':
-            return self._real()
-        elif attr == 'imag':
-            return self._imag()
-        elif attr == 'size':
-            return self.getnnz()
-        else:
-            raise AttributeError, attr + " not found"
-
-    def transpose(self):
-        return self.tocsr().transpose()
-
-    def conj(self):
-        return self.tocsr().conj()
-
-    def conjugate(self):
-        return self.conj()
-
-    # Renamed conjtranspose() -> getH() for compatibility with dense matrices
-    def getH(self):
-        return self.transpose().conj()
-
-    def _real(self):
-        return self.tocsr()._real()
-
-    def _imag(self):
-        return self.tocsr()._imag()
-
-
-    def nonzero(self):
-        """nonzero indices
-
-        Returns a tuple of arrays (row,col) containing the indices
-        of the non-zero elements of the matrix.
-
-        Example
-        -------
-
-        >>> from scipy.sparse import csr_matrix
-        >>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
-        >>> A.nonzero()
-        (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
-
-        """
-
-        # convert to COOrdinate format
-        A = self.tocoo()
-        nz_mask = A.data != 0
-        return (A.row[nz_mask],A.col[nz_mask])
-
-
-    def getcol(self, j):
-        """Returns a copy of column j of the matrix, as an (m x 1) sparse
-        matrix (column vector).
-        """
-        # Spmatrix subclasses should override this method for efficiency.
-        # Post-multiply by a (n x 1) column vector 'a' containing all zeros
-        # except for a_j = 1
-        from csc import csc_matrix
-        n = self.shape[1]
-        a = csc_matrix((n, 1), dtype=self.dtype)
-        a[j, 0] = 1
-        return self * a
-
-    def getrow(self, i):
-        """Returns a copy of row i of the matrix, as a (1 x n) sparse
-        matrix (row vector).
-        """
-        # Spmatrix subclasses should override this method for efficiency.
-        # Pre-multiply by a (1 x m) row vector 'a' containing all zeros
-        # except for a_i = 1
-        from csr import csr_matrix
-        m = self.shape[0]
-        a = csr_matrix((1, m), dtype=self.dtype)
-        a[0, i] = 1
-        return a * self
-
-    #def __array__(self):
-    #    return self.toarray()
-
-    def todense(self):
-        return np.asmatrix(self.toarray())
-
-    def toarray(self):
-        return self.tocoo().toarray()
-
-    def todok(self):
-        return self.tocoo().todok()
-
-    def tocoo(self):
-        return self.tocsr().tocoo()
-
-    def tolil(self):
-        return self.tocsr().tolil()
-
-    def todia(self):
-        return self.tocoo().todia()
-
-    def tobsr(self, blocksize=None):
-        return self.tocsr().tobsr(blocksize=blocksize)
-
-    def copy(self):
-        return self.__class__(self,copy=True)
-
-    def sum(self, axis=None):
-        """Sum the matrix over the given axis.  If the axis is None, sum
-        over both rows and columns, returning a scalar.
-        """
-        # We use multiplication by an array of ones to achieve this.
-        # For some sparse matrix formats more efficient methods are
-        # possible -- these should override this function.
-        m, n = self.shape
-        if axis == 0:
-            # sum over columns
-            return np.asmatrix(np.ones((1, m), dtype=self.dtype)) * self
-        elif axis == 1:
-            # sum over rows
-            return self * np.asmatrix(np.ones((n, 1), dtype=self.dtype))
-        elif axis is None:
-            # sum over rows and columns
-            return ( self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) ).sum()
-        else:
-            raise ValueError, "axis out of bounds"
-
-    def mean(self, axis=None):
-        """Average the matrix over the given axis.  If the axis is None,
-        average over both rows and columns, returning a scalar.
-        """
-        if axis == 0:
-            mean = self.sum(0)
-            mean *= 1.0 / self.shape[0]
-            return mean
-        elif axis == 1:
-            mean = self.sum(1)
-            mean *= 1.0 / self.shape[1]
-            return mean
-        elif axis is None:
-            return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
-        else:
-            raise ValueError, "axis out of bounds"
-
-    def diagonal(self):
-        """Returns the main diagonal of the matrix
-        """
-        #TODO support k != 0
-        return self.tocsr().diagonal()
-
-    def setdiag(self, values, k=0):
-        """Fills the diagonal elements {a_ii} with the values from the
-        given sequence.  If k != 0, fills the off-diagonal elements
-        {a_{i,i+k}} instead.
-
-        values may have any length.  If the diagonal is longer than values,
-        then the remaining diagonal entries will not be set.  If values if
-        longer than the diagonal, then the remaining values are ignored.
-        """
-        M, N = self.shape
-        if (k > 0 and k >= N) or (k < 0 and -k >= M):
-            raise ValueError, "k exceedes matrix dimensions"
-        if k < 0:
-            max_index = min(M+k, N, len(values))
-            for i,v in enumerate(values[:max_index]):
-                self[i - k, i] = v
-        else:
-            max_index = min(M, N-k, len(values))
-            for i,v in enumerate(values[:max_index]):
-                self[i, i + k] = v
-
-    def save(self, file_name, format = '%d %d %f\n'):
-        #deprecated on Dec 14 2007
-        #remove after 0.7 release
-        warn('save() is deprecated, consider using mmwrite() or savemat()' \
-                ' provided by scipy.io instead',
-                DeprecationWarning)
-        try:
-            fd = open(file_name, 'w')
-        except Exception, e:
-            raise e, file_name
-
-        fd.write('%d %d\n' % self.shape)
-        fd.write('%d\n' % self.size)
-        for ii in xrange(self.size):
-            ir, ic = self.rowcol(ii)
-            data = self.getdata(ii)
-            fd.write(format % (ir, ic, data))
-        fd.close()
-
-
-from sputils import _isinstance
-
-def isspmatrix(x):
-    return _isinstance(x, spmatrix)
-
-issparse = isspmatrix
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/compressed.py b/cobra/oven/danielhyduke/jython/scipy/sparse/compressed.py
deleted file mode 100644
index 93ed506..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/compressed.py
+++ /dev/null
@@ -1,708 +0,0 @@
-"""Base class for sparse matrix formats using compressed storage
-"""
-
-__all__ = []
-
-from warnings import warn
-
-import numpy as np
-
-from base import spmatrix, isspmatrix, SparseEfficiencyWarning
-from data import _data_matrix
-import sparsetools
-from sputils import upcast, to_native, isdense, isshape, getdtype, \
-        isscalarlike, isintlike
-
-
-class _cs_matrix(_data_matrix):
-    """base matrix class for compressed row and column oriented matrices"""
-
-    def __init__(self, arg1, shape=None, dtype=None, copy=False, dims=None, nzmax=None):
-        _data_matrix.__init__(self)
-
-        if dims is not None:
-            warn("dims= is deprecated, use shape= instead", DeprecationWarning)
-            shape=dims
-
-        if nzmax is not None:
-            warn("nzmax= is deprecated", DeprecationWarning)
-
-
-        if isspmatrix(arg1):
-            if arg1.format == self.format and copy:
-                arg1 = arg1.copy()
-            else:
-                arg1 = arg1.asformat(self.format)
-            self._set_self( arg1 )
-
-        elif isinstance(arg1, tuple):
-            if isshape(arg1):
-                # It's a tuple of matrix dimensions (M, N)
-                # create empty matrix
-                self.shape = arg1   #spmatrix checks for errors here
-                M, N = self.shape
-                self.data    = np.zeros(0, getdtype(dtype, default=float))
-                self.indices = np.zeros(0, np.intc)
-                self.indptr  = np.zeros(self._swap((M,N))[0] + 1, dtype=np.intc)
-            else:
-                if len(arg1) == 2:
-                    # (data, ij) format
-                    from coo import coo_matrix
-                    other = self.__class__( coo_matrix(arg1, shape=shape) )
-                    self._set_self( other )
-                elif len(arg1) == 3:
-                    # (data, indices, indptr) format
-                    (data, indices, indptr) = arg1
-                    self.indices = np.array(indices, copy=copy)
-                    self.indptr  = np.array(indptr, copy=copy)
-                    self.data    = np.array(data, copy=copy, dtype=getdtype(dtype, data))
-                else:
-                    raise ValueError, "unrecognized %s_matrix constructor usage" %\
-                            self.format
-
-        else:
-            #must be dense
-            try:
-                arg1 = np.asarray(arg1)
-            except:
-                raise ValueError, "unrecognized %s_matrix constructor usage" % \
-                        self.format
-            from coo import coo_matrix
-            self._set_self( self.__class__(coo_matrix(arg1, dtype=dtype)) )
-
-        # Read matrix dimensions given, if any
-        if shape is not None:
-            self.shape = shape   # spmatrix will check for errors
-        else:
-            if self.shape is None:
-                # shape not already set, try to infer dimensions
-                try:
-                    major_dim = len(self.indptr) - 1
-                    minor_dim = self.indices.max() + 1
-                except:
-                    raise ValueError,'unable to infer matrix dimensions'
-                else:
-                    self.shape = self._swap((major_dim,minor_dim))
-
-        if dtype is not None:
-            self.data = self.data.astype(dtype)
-
-        self.check_format(full_check=False)
-
-    def getnnz(self):
-        return self.indptr[-1]
-    nnz = property(fget=getnnz)
-
-
-    def _set_self(self, other, copy=False):
-        """take the member variables of other and assign them to self"""
-
-        if copy:
-            other = other.copy()
-
-        self.data    = other.data
-        self.indices = other.indices
-        self.indptr  = other.indptr
-        self.shape   = other.shape
-
-    def check_format(self, full_check=True):
-        """check whether the matrix format is valid
-
-        Parameters
-        ==========
-
-            - full_check : {bool}
-                - True  - rigorous check, O(N) operations : default
-                - False - basic check, O(1) operations
-
-        """
-        #use _swap to determine proper bounds
-        major_name,minor_name = self._swap(('row','column'))
-        major_dim,minor_dim = self._swap(self.shape)
-
-        # index arrays should have integer data types
-        if self.indptr.dtype.kind != 'i':
-            warn("indptr array has non-integer dtype (%s)" \
-                    % self.indptr.dtype.name )
-        if self.indices.dtype.kind != 'i':
-            warn("indices array has non-integer dtype (%s)" \
-                    % self.indices.dtype.name )
-
-        # only support 32-bit ints for now
-        self.indptr  = np.asarray(self.indptr,  dtype=np.intc)
-        self.indices = np.asarray(self.indices, dtype=np.intc)
-        self.data    = to_native(self.data)
-
-        # check array shapes
-        if np.rank(self.data) != 1 or np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
-            raise ValueError('data, indices, and indptr should be rank 1')
-
-        # check index pointer
-        if (len(self.indptr) != major_dim + 1 ):
-            raise ValueError, \
-                "index pointer size (%d) should be (%d)" % \
-                 (len(self.indptr), major_dim + 1)
-        if (self.indptr[0] != 0):
-            raise ValueError,"index pointer should start with 0"
-
-        # check index and data arrays
-        if (len(self.indices) != len(self.data)):
-            raise ValueError,"indices and data should have the same size"
-        if (self.indptr[-1] > len(self.indices)):
-            raise ValueError, \
-                  "Last value of index pointer should be less than "\
-                  "the size of index and data arrays"
-
-        self.prune()
-
-        if full_check:
-            #check format validity (more expensive)
-            if self.nnz > 0:
-                if self.indices.max() >= minor_dim:
-                    raise ValueError, "%s index values must be < %d" % \
-                            (minor_name,minor_dim)
-                if self.indices.min() < 0:
-                    raise ValueError, "%s index values must be >= 0" % \
-                            minor_name
-                if np.diff(self.indptr).min() < 0:
-                    raise ValueError,'index pointer values must form a " \
-                                        "non-decreasing sequence'
-
-        #if not self.has_sorted_indices():
-        #    warn('Indices were not in sorted order.  Sorting indices.')
-        #    self.sort_indices()
-        #    assert(self.has_sorted_indices())
-        #TODO check for duplicates?
-
-
-    def __add__(self,other):
-        # First check if argument is a scalar
-        if isscalarlike(other):
-            # Now we would add this scalar to every element.
-            raise NotImplementedError, 'adding a scalar to a CSC or CSR ' \
-                  'matrix is not supported'
-        elif isspmatrix(other):
-            if (other.shape != self.shape):
-                raise ValueError, "inconsistent shapes"
-
-            return self._binopt(other,'_plus_')
-        elif isdense(other):
-            # Convert this matrix to a dense matrix and add them
-            return self.todense() + other
-        else:
-            raise NotImplementedError
-
-    def __radd__(self,other):
-        return self.__add__(other)
-
-    def __sub__(self,other):
-        # First check if argument is a scalar
-        if isscalarlike(other):
-            # Now we would add this scalar to every element.
-            raise NotImplementedError, 'adding a scalar to a sparse ' \
-                  'matrix is not supported'
-        elif isspmatrix(other):
-            if (other.shape != self.shape):
-                raise ValueError, "inconsistent shapes"
-
-            return self._binopt(other,'_minus_')
-        elif isdense(other):
-            # Convert this matrix to a dense matrix and subtract them
-            return self.todense() - other
-        else:
-            raise NotImplementedError
-
-    def __rsub__(self,other):  # other - self
-        #note: this can't be replaced by other + (-self) for unsigned types
-        if isscalarlike(other):
-            # Now we would add this scalar to every element.
-            raise NotImplementedError, 'adding a scalar to a sparse ' \
-                  'matrix is not supported'
-        elif isdense(other):
-            # Convert this matrix to a dense matrix and subtract them
-            return other - self.todense()
-        else:
-            raise NotImplementedError
-
-
-    def __truediv__(self,other):
-        if isscalarlike(other):
-            return self * (1./other)
-
-        elif isspmatrix(other):
-            if other.shape != self.shape:
-                raise ValueError('inconsistent shapes')
-
-            return self._binopt(other,'_eldiv_')
-
-        else:
-            raise NotImplementedError
-
-
-    def multiply(self, other):
-        """Point-wise multiplication by another matrix
-        """
-        if other.shape != self.shape:
-            raise ValueError('inconsistent shapes')
-
-        if isdense(other):
-            return np.multiply(self.todense(),other)
-        else:
-            other = self.__class__(other)
-            return self._binopt(other,'_elmul_')
-
-
-    ###########################
-    # Multiplication handlers #
-    ###########################
-
-    def _mul_vector(self, other):
-        M,N = self.shape
-
-        #output array
-        result = np.zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) )
-
-        # csr_matvec or csc_matvec
-        fn = getattr(sparsetools,self.format + '_matvec')
-        fn(M, N, self.indptr, self.indices, self.data, other, result)
-
-        return result
-
-
-    def _mul_multivector(self, other):
-        M,N = self.shape
-        n_vecs = other.shape[1] #number of column vectors
-
-        result = np.zeros( (M,n_vecs), dtype=upcast(self.dtype,other.dtype) )
-
-        # csr_matvecs or csc_matvecs
-        fn = getattr(sparsetools,self.format + '_matvecs')
-        fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel())
-
-        return result
-
-
-    def _mul_sparse_matrix(self, other):
-        M, K1 = self.shape
-        K2, N = other.shape
-
-        major_axis = self._swap((M,N))[0]
-        indptr = np.empty(major_axis + 1, dtype=np.intc)
-
-        other = self.__class__(other) #convert to this format
-        fn = getattr(sparsetools, self.format + '_matmat_pass1')
-        fn( M, N, self.indptr, self.indices, \
-                  other.indptr, other.indices, \
-                  indptr)
-
-        nnz = indptr[-1]
-        indices = np.empty(nnz, dtype=np.intc)
-        data    = np.empty(nnz, dtype=upcast(self.dtype,other.dtype))
-
-        fn = getattr(sparsetools, self.format + '_matmat_pass2')
-        fn( M, N, self.indptr, self.indices, self.data, \
-                  other.indptr, other.indices, other.data, \
-                  indptr, indices, data)
-
-        return self.__class__((data,indices,indptr),shape=(M,N))
-
-
-    @np.deprecate
-    def getdata(self, ind):
-        return self.data[ind]
-
-    def diagonal(self):
-        """Returns the main diagonal of the matrix
-        """
-        #TODO support k-th diagonal
-        fn = getattr(sparsetools, self.format + "_diagonal")
-        y = np.empty( min(self.shape), dtype=upcast(self.dtype) )
-        fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y)
-        return y
-
-    def sum(self, axis=None):
-        """Sum the matrix over the given axis.  If the axis is None, sum
-        over both rows and columns, returning a scalar.
-        """
-        # The spmatrix base class already does axis=0 and axis=1 efficiently
-        # so we only do the case axis=None here
-        if axis is None:
-            return self.data.sum()
-        else:
-            return spmatrix.sum(self,axis)
-            raise ValueError, "axis out of bounds"
-
-    #######################
-    # Getting and Setting #
-    #######################
-
-    def __getitem__(self, key):
-        if isinstance(key, tuple):
-            row = key[0]
-            col = key[1]
-
-            #TODO implement CSR[ [1,2,3], X ] with sparse matmat
-            #TODO make use of sorted indices
-
-            if isintlike(row) and isintlike(col):
-                return self._get_single_element(row,col)
-            else:
-                major,minor = self._swap((row,col))
-                if isintlike(major) and isinstance(minor,slice):
-                    minor_shape = self._swap(self.shape)[1]
-                    start, stop, stride = minor.indices(minor_shape)
-                    out_shape   = self._swap( (1, stop-start) )
-                    return self._get_slice( major, start, stop, stride, out_shape)
-                elif isinstance( row, slice) or isinstance(col, slice):
-                    return self._get_submatrix( row, col )
-                else:
-                    raise NotImplementedError
-
-        elif isintlike(key):
-            return self[key, :]
-        else:
-            raise IndexError, "invalid index"
-
-
-    def _get_single_element(self,row,col):
-        M, N = self.shape
-        if (row < 0):
-            row += M
-        if (col < 0):
-            col += N
-        if not (0<=row<M) or not (0<=col<N):
-            raise IndexError("index out of bounds")
-
-        major_index, minor_index = self._swap((row,col))
-
-        start = self.indptr[major_index]
-        end   = self.indptr[major_index+1]
-        indxs = np.where(minor_index == self.indices[start:end])[0]
-
-        num_matches = len(indxs)
-
-        if num_matches == 0:
-            # entry does not appear in the matrix
-            return 0
-        elif num_matches == 1:
-            return self.data[start:end][indxs[0]]
-        else:
-            raise ValueError('nonzero entry (%d,%d) occurs more than once' % (row,col))
-
-    def _get_slice(self, i, start, stop, stride, shape):
-        """Returns a copy of the elements
-            [i, start:stop:string] for row-oriented matrices
-            [start:stop:string, i] for column-oriented matrices
-        """
-        if stride != 1:
-            raise ValueError, "slicing with step != 1 not supported"
-        if stop <= start:
-            raise ValueError, "slice width must be >= 1"
-
-        #TODO make [i,:] faster
-        #TODO implement [i,x:y:z]
-
-        indices = []
-
-        for ind in xrange(self.indptr[i], self.indptr[i+1]):
-            if self.indices[ind] >= start and self.indices[ind] < stop:
-                indices.append(ind)
-
-        index  = self.indices[indices] - start
-        data   = self.data[indices]
-        indptr = np.array([0, len(indices)])
-        return self.__class__((data, index, indptr), shape=shape, \
-                              dtype=self.dtype)
-
-    def _get_submatrix( self, slice0, slice1 ):
-        """Return a submatrix of this matrix (new matrix is created)."""
-
-        slice0, slice1 = self._swap((slice0,slice1))
-        shape0, shape1 = self._swap(self.shape)
-        def _process_slice( sl, num ):
-            if isinstance( sl, slice ):
-                i0, i1 = sl.start, sl.stop
-                if i0 is None:
-                    i0 = 0
-                elif i0 < 0:
-                    i0 = num + i0
-
-                if i1 is None:
-                    i1 = num
-                elif i1 < 0:
-                    i1 = num + i1
-
-                return i0, i1
-
-            elif np.isscalar( sl ):
-                if sl < 0:
-                    sl += num
-
-                return sl, sl + 1
-
-            else:
-                return sl[0], sl[1]
-
-        def _in_bounds( i0, i1, num ):
-            if not (0<=i0<num) or not (0<i1<=num) or not (i0<i1):
-                raise IndexError,\
-                      "index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %\
-                      (i0, num, i1, num, i0, i1)
-
-        i0, i1 = _process_slice( slice0, shape0 )
-        j0, j1 = _process_slice( slice1, shape1 )
-        _in_bounds( i0, i1, shape0 )
-        _in_bounds( j0, j1, shape1 )
-
-        aux = sparsetools.get_csr_submatrix( shape0, shape1,
-                                             self.indptr, self.indices,
-                                             self.data,
-                                             i0, i1, j0, j1 )
-
-        data, indices, indptr = aux[2], aux[1], aux[0]
-        shape = self._swap( (i1 - i0, j1 - j0) )
-
-        return self.__class__( (data,indices,indptr), shape=shape )
-
-
-    def __setitem__(self, key, val):
-        if isinstance(key, tuple):
-            row,col = key
-            if not (isscalarlike(row) and isscalarlike(col)):
-                raise NotImplementedError("Fancy indexing in assignment not "
-                                          "supported for csr matrices.")
-            M, N = self.shape
-            if (row < 0):
-                row += M
-            if (col < 0):
-                col += N
-            if not (0<=row<M) or not (0<=col<N):
-                raise IndexError, "index out of bounds"
-
-            major_index, minor_index = self._swap((row,col))
-
-            start = self.indptr[major_index]
-            end   = self.indptr[major_index+1]
-            indxs = np.where(minor_index == self.indices[start:end])[0]
-
-            num_matches = len(indxs)
-
-
-            if not np.isscalar(val):
-                raise ValueError('setting an array element with a sequence')
-
-            val = self.dtype.type(val)
-
-            if num_matches == 0:
-                #entry not already present
-                warn('changing the sparsity structure of a %s_matrix is expensive. ' \
-                        'lil_matrix is more efficient.' % self.format, \
-                        SparseEfficiencyWarning)
-
-                if self.has_sorted_indices:
-                    # preserve sorted order
-                    newindx = start + self.indices[start:end].searchsorted(minor_index)
-                else:
-                    newindx = start
-
-                val         = np.array([val],         dtype=self.data.dtype)
-                minor_index = np.array([minor_index], dtype=self.indices.dtype)
-
-                self.data    = np.concatenate((self.data[:newindx],    val,         self.data[newindx:]))
-                self.indices = np.concatenate((self.indices[:newindx], minor_index, self.indices[newindx:]))
-                self.indptr  = self.indptr.copy()
-
-                self.indptr[major_index+1:] += 1
-
-            elif num_matches == 1:
-                #entry appears exactly once
-                self.data[start:end][indxs[0]] = val
-            else:
-                #entry appears more than once
-                raise ValueError,'nonzero entry (%d,%d) occurs more than once' % (row,col)
-
-            self.check_format(full_check=True)
-        else:
-            # We should allow slices here!
-            raise IndexError, "invalid index"
-
-    ######################
-    # Conversion methods #
-    ######################
-
-    def todia(self):
-        return self.tocoo(copy=False).todia()
-
-    def todok(self):
-        return self.tocoo(copy=False).todok()
-
-    def tocoo(self,copy=True):
-        """Return a COOrdinate representation of this matrix
-
-        When copy=False the index and data arrays are not copied.
-        """
-        major_dim,minor_dim = self._swap(self.shape)
-
-        data = self.data
-        minor_indices = self.indices
-
-        if copy:
-            data = data.copy()
-            minor_indices = minor_indices.copy()
-
-        major_indices = np.empty(len(minor_indices), dtype=np.intc)
-
-        sparsetools.expandptr(major_dim,self.indptr,major_indices)
-
-        row,col = self._swap((major_indices,minor_indices))
-
-        from coo import coo_matrix
-        return coo_matrix((data,(row,col)), self.shape)
-
-    def toarray(self):
-        return self.tocoo(copy=False).toarray()
-
-    ##############################################################
-    # methods that examine or modify the internal data structure #
-    ##############################################################
-
-    def eliminate_zeros(self):
-        """Remove zero entries from the matrix
-
-        The is an *in place* operation
-        """
-        fn = sparsetools.csr_eliminate_zeros
-        M,N = self._swap(self.shape)
-        fn( M, N, self.indptr, self.indices, self.data)
-
-        self.prune() #nnz may have changed
-
-    def sum_duplicates(self):
-        """Eliminate duplicate matrix entries by adding them together
-
-        The is an *in place* operation
-        """
-        self.sort_indices()
-
-        fn = sparsetools.csr_sum_duplicates
-        M,N = self._swap(self.shape)
-        fn( M, N, self.indptr, self.indices, self.data)
-
-        self.prune() #nnz may have changed
-
-
-    def __get_sorted(self):
-        """Determine whether the matrix has sorted indices
-
-        Returns
-            - True: if the indices of the matrix are in sorted order
-            - False: otherwise
-
-        """
-
-        #first check to see if result was cached
-        if not hasattr(self,'__has_sorted_indices'):
-            fn = sparsetools.csr_has_sorted_indices
-            self.__has_sorted_indices = \
-                    fn( len(self.indptr) - 1, self.indptr, self.indices)
-        return self.__has_sorted_indices
-
-    def __set_sorted(self, val):
-        self.__has_sorted_indices = bool(val)
-
-    has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
-
-    def sorted_indices(self):
-        """Return a copy of this matrix with sorted indices
-        """
-        A = self.copy()
-        A.sort_indices()
-        return A
-
-        # an alternative that has linear complexity is the following
-        # although the previous option is typically faster
-        #return self.toother().toother()
-
-    def sort_indices(self):
-        """Sort the indices of this matrix *in place*
-        """
-
-        if not self.has_sorted_indices:
-            fn = sparsetools.csr_sort_indices
-            fn( len(self.indptr) - 1, self.indptr, self.indices, self.data)
-            self.has_sorted_indices = True
-
-    #TODO remove after 0.7
-    def ensure_sorted_indices(self, inplace=False):
-        """Return a copy of this matrix where the column indices are sorted
-        """
-        warn('ensure_sorted_indices is deprecated, ' \
-                'use sorted_indices() or sort_indices() instead', \
-                DeprecationWarning)
-
-        if inplace:
-            self.sort_indices()
-        else:
-            return self.sorted_indices()
-
-    def prune(self):
-        """Remove empty space after all non-zero elements.
-        """
-        major_dim = self._swap(self.shape)[0]
-
-        if len(self.indptr) != major_dim + 1:
-            raise ValueError('index pointer has invalid length')
-        if len(self.indices) < self.nnz:
-            raise ValueError('indices array has fewer than nnz elements')
-        if len(self.data) < self.nnz:
-            raise ValueError('data array has fewer than nnz elements')
-
-        self.data    = self.data[:self.nnz]
-        self.indices = self.indices[:self.nnz]
-
-
-    ###################
-    # utility methods #
-    ###################
-
-    # needed by _data_matrix
-    def _with_data(self,data,copy=True):
-        """Returns a matrix with the same sparsity structure as self,
-        but with different data.  By default the structure arrays
-        (i.e. .indptr and .indices) are copied.
-        """
-        if copy:
-            return self.__class__((data,self.indices.copy(),self.indptr.copy()), \
-                                   shape=self.shape,dtype=data.dtype)
-        else:
-            return self.__class__((data,self.indices,self.indptr), \
-                                   shape=self.shape,dtype=data.dtype)
-
-    def _binopt(self, other, op):
-        """apply the binary operation fn to two sparse matrices"""
-        other = self.__class__(other)
-
-        # e.g. csr_plus_csr, csr_minus_csr, etc.
-        fn = getattr(sparsetools, self.format + op + self.format)
-
-        maxnnz  = self.nnz + other.nnz
-        indptr  = np.empty_like(self.indptr)
-        indices = np.empty(maxnnz, dtype=np.intc)
-        data    = np.empty(maxnnz, dtype=upcast(self.dtype,other.dtype))
-
-        fn(self.shape[0], self.shape[1], \
-                self.indptr,  self.indices,  self.data,
-                other.indptr, other.indices, other.data,
-                indptr, indices, data)
-
-        actual_nnz = indptr[-1]
-        indices = indices[:actual_nnz]
-        data    = data[:actual_nnz]
-        if actual_nnz < maxnnz / 2:
-            #too much waste, trim arrays
-            indices = indices.copy()
-            data    = data.copy()
-
-        A = self.__class__((data, indices, indptr), shape=self.shape)
-
-        return A
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/construct.py b/cobra/oven/danielhyduke/jython/scipy/sparse/construct.py
deleted file mode 100644
index ef2f4ab..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/construct.py
+++ /dev/null
@@ -1,535 +0,0 @@
-"""Functions to construct sparse matrices
-"""
-
-__docformat__ = "restructuredtext en"
-
-__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum',
-            'hstack', 'vstack', 'bmat', 'rand']
-
-
-from warnings import warn
-
-import numpy as np
-
-from sputils import upcast
-
-from csr import csr_matrix
-from csc import csc_matrix
-from bsr import bsr_matrix
-from coo import coo_matrix
-from lil import lil_matrix
-from dia import dia_matrix
-
-def spdiags(data, diags, m, n, format=None):
-    """
-    Return a sparse matrix from diagonals.
-
-    Parameters
-    ----------
-    data   : array_like
-        matrix diagonals stored row-wise
-    diags  : diagonals to set
-        - k = 0  the main diagonal
-        - k > 0  the k-th upper diagonal
-        - k < 0  the k-th lower diagonal
-    m, n : int
-        shape of the result
-    format : format of the result (e.g. "csr")
-        By default (format=None) an appropriate sparse matrix
-        format is returned.  This choice is subject to change.
-
-    See Also
-    --------
-    dia_matrix : the sparse DIAgonal format.
-
-    Examples
-    --------
-    >>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
-    >>> diags = array([0,-1,2])
-    >>> spdiags(data, diags, 4, 4).todense()
-    matrix([[1, 0, 3, 0],
-            [1, 2, 0, 4],
-            [0, 2, 3, 0],
-            [0, 0, 3, 4]])
-
-    """
-    return dia_matrix((data, diags), shape=(m,n)).asformat(format)
-
-def identity(n, dtype='d', format=None):
-    """Identity matrix in sparse format
-
-    Returns an identity matrix with shape (n,n) using a given
-    sparse format and dtype.
-
-    Parameters
-    ----------
-    n : integer
-        Shape of the identity matrix.
-    dtype :
-        Data type of the matrix
-    format : string
-        Sparse format of the result, e.g. format="csr", etc.
-
-    Examples
-    --------
-    >>> identity(3).todense()
-    matrix([[ 1.,  0.,  0.],
-            [ 0.,  1.,  0.],
-            [ 0.,  0.,  1.]])
-    >>> identity(3, dtype='int8', format='dia')
-    <3x3 sparse matrix of type '<type 'numpy.int8'>'
-            with 3 stored elements (1 diagonals) in DIAgonal format>
-
-    """
-
-    if format in ['csr','csc']:
-        indptr  = np.arange(n+1, dtype=np.intc)
-        indices = np.arange(n,   dtype=np.intc)
-        data    = np.ones(n,     dtype=dtype)
-        cls = eval('%s_matrix' % format)
-        return cls((data,indices,indptr),(n,n))
-    elif format == 'coo':
-        row  = np.arange(n, dtype=np.intc)
-        col  = np.arange(n, dtype=np.intc)
-        data = np.ones(n, dtype=dtype)
-        return coo_matrix((data,(row,col)),(n,n))
-    elif format == 'dia':
-        data = np.ones(n, dtype=dtype)
-        diags = [0]
-        return dia_matrix((data,diags), shape=(n,n))
-    else:
-        return identity(n, dtype=dtype, format='csr').asformat(format)
-
-
-def eye(m, n, k=0, dtype='d', format=None):
-    """eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
-    is all ones and everything else is zeros.
-    """
-    m,n = int(m),int(n)
-    diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
-    return spdiags(diags, k, m, n).asformat(format)
-
-
-def kron(A, B, format=None):
-    """kronecker product of sparse matrices A and B
-
-    Parameters
-    ----------
-    A : sparse or dense matrix
-        first matrix of the product
-    B : sparse or dense matrix
-        second matrix of the product
-    format : string
-        format of the result (e.g. "csr")
-
-    Returns
-    -------
-    kronecker product in a sparse matrix format
-
-
-    Examples
-    --------
-    >>> A = csr_matrix(array([[0,2],[5,0]]))
-    >>> B = csr_matrix(array([[1,2],[3,4]]))
-    >>> kron(A,B).todense()
-    matrix([[ 0,  0,  2,  4],
-            [ 0,  0,  6,  8],
-            [ 5, 10,  0,  0],
-            [15, 20,  0,  0]])
-
-    >>> kron(A,[[1,2],[3,4]]).todense()
-    matrix([[ 0,  0,  2,  4],
-            [ 0,  0,  6,  8],
-            [ 5, 10,  0,  0],
-            [15, 20,  0,  0]])
-
-    """
-    B = coo_matrix(B)
-
-    if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
-        #B is fairly dense, use BSR
-        A = csr_matrix(A,copy=True)
-
-        output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
-
-        if A.nnz == 0 or B.nnz == 0:
-            # kronecker product is the zero matrix
-            return coo_matrix( output_shape )
-
-        B = B.toarray()
-        data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
-        data = data * B
-
-        return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
-    else:
-        #use COO
-        A = coo_matrix(A)
-        output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
-
-        if A.nnz == 0 or B.nnz == 0:
-            # kronecker product is the zero matrix
-            return coo_matrix( output_shape )
-
-        # expand entries of a into blocks
-        row  = A.row.repeat(B.nnz)
-        col  = A.col.repeat(B.nnz)
-        data = A.data.repeat(B.nnz)
-
-        row *= B.shape[0]
-        col *= B.shape[1]
-
-        # increment block indices
-        row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
-        row += B.row
-        col += B.col
-        row,col = row.reshape(-1),col.reshape(-1)
-
-        # compute block entries
-        data = data.reshape(-1,B.nnz) * B.data
-        data = data.reshape(-1)
-
-        return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
-
-def kronsum(A, B, format=None):
-    """kronecker sum of sparse matrices A and B
-
-    Kronecker sum of two sparse matrices is a sum of two Kronecker
-    products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
-    and B has shape (n,n) and I_m and I_n are identity matrices
-    of shape (m,m) and (n,n) respectively.
-
-    Parameters
-    ----------
-    A
-        square matrix
-    B
-        square matrix
-    format : string
-        format of the result (e.g. "csr")
-
-    Returns
-    -------
-    kronecker sum in a sparse matrix format
-
-    Examples
-    --------
-
-
-    """
-    A = coo_matrix(A)
-    B = coo_matrix(B)
-
-    if A.shape[0] != A.shape[1]:
-        raise ValueError('A is not square')
-
-    if B.shape[0] != B.shape[1]:
-        raise ValueError('B is not square')
-
-    dtype = upcast(A.dtype, B.dtype)
-
-    L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
-    R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
-
-    return (L+R).asformat(format) #since L + R is not always same format
-
-
-def hstack(blocks, format=None, dtype=None):
-    """
-    Stack sparse matrices horizontally (column wise)
-
-    Parameters
-    ----------
-    blocks
-        sequence of sparse matrices with compatible shapes
-    format : string
-        sparse format of the result (e.g. "csr")
-        by default an appropriate sparse matrix format is returned.
-        This choice is subject to change.
-
-    See Also
-    --------
-    vstack : stack sparse matrices vertically (row wise)
-
-    Examples
-    --------
-    >>> from scipy.sparse import coo_matrix, vstack
-    >>> A = coo_matrix([[1,2],[3,4]])
-    >>> B = coo_matrix([[5],[6]])
-    >>> hstack( [A,B] ).todense()
-    matrix([[1, 2, 5],
-            [3, 4, 6]])
-
-    """
-    return bmat([blocks], format=format, dtype=dtype)
-
-def vstack(blocks, format=None, dtype=None):
-    """
-    Stack sparse matrices vertically (row wise)
-
-    Parameters
-    ----------
-    blocks
-        sequence of sparse matrices with compatible shapes
-    format : string
-        sparse format of the result (e.g. "csr")
-        by default an appropriate sparse matrix format is returned.
-        This choice is subject to change.
-
-    See Also
-    --------
-    hstack : stack sparse matrices horizontally (column wise)
-
-    Examples
-    --------
-    >>> from scipy.sparse import coo_matrix, vstack
-    >>> A = coo_matrix([[1,2],[3,4]])
-    >>> B = coo_matrix([[5,6]])
-    >>> vstack( [A,B] ).todense()
-    matrix([[1, 2],
-            [3, 4],
-            [5, 6]])
-
-    """
-    return bmat([ [b] for b in blocks ], format=format, dtype=dtype)
-
-def bmat(blocks, format=None, dtype=None):
-    """
-    Build a sparse matrix from sparse sub-blocks
-
-    Parameters
-    ----------
-    blocks
-        grid of sparse matrices with compatible shapes
-        an entry of None implies an all-zero matrix
-    format : sparse format of the result (e.g. "csr")
-        by default an appropriate sparse matrix format is returned.
-        This choice is subject to change.
-
-    Examples
-    --------
-    >>> from scipy.sparse import coo_matrix, bmat
-    >>> A = coo_matrix([[1,2],[3,4]])
-    >>> B = coo_matrix([[5],[6]])
-    >>> C = coo_matrix([[7]])
-    >>> bmat( [[A,B],[None,C]] ).todense()
-    matrix([[1, 2, 5],
-            [3, 4, 6],
-            [0, 0, 7]])
-
-    >>> bmat( [[A,None],[None,C]] ).todense()
-    matrix([[1, 2, 0],
-            [3, 4, 0],
-            [0, 0, 7]])
-
-    """
-
-    blocks = np.asarray(blocks, dtype='object')
-
-    if np.rank(blocks) != 2:
-        raise ValueError('blocks must have rank 2')
-
-    M,N = blocks.shape
-
-    block_mask   = np.zeros(blocks.shape,    dtype=np.bool)
-    brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc)
-    bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc)
-
-    # convert everything to COO format
-    for i in range(M):
-        for j in range(N):
-            if blocks[i,j] is not None:
-                A = coo_matrix(blocks[i,j])
-                blocks[i,j] = A
-                block_mask[i,j] = True
-
-                if brow_lengths[i] == 0:
-                    brow_lengths[i] = A.shape[0]
-                else:
-                    if brow_lengths[i] != A.shape[0]:
-                        raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
-
-                if bcol_lengths[j] == 0:
-                    bcol_lengths[j] = A.shape[1]
-                else:
-                    if bcol_lengths[j] != A.shape[1]:
-                        raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
-
-
-    # ensure that at least one value in each row and col is not None
-    if brow_lengths.min() == 0:
-        raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )
-    if bcol_lengths.min() == 0:
-        raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )
-
-    nnz = sum([ A.nnz for A in blocks[block_mask] ])
-    if dtype is None:
-        dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )
-
-    row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
-    col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
-
-    data = np.empty(nnz, dtype=dtype)
-    row  = np.empty(nnz, dtype=np.intc)
-    col  = np.empty(nnz, dtype=np.intc)
-
-    nnz = 0
-    for i in range(M):
-        for j in range(N):
-            if blocks[i,j] is not None:
-                A = blocks[i,j]
-                data[nnz:nnz + A.nnz] = A.data
-                row[nnz:nnz + A.nnz]  = A.row
-                col[nnz:nnz + A.nnz]  = A.col
-
-                row[nnz:nnz + A.nnz] += row_offsets[i]
-                col[nnz:nnz + A.nnz] += col_offsets[j]
-
-                nnz += A.nnz
-
-    shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
-    return coo_matrix((data, (row, col)), shape=shape).asformat(format)
-
-def rand(m, n, density=0.01, format="coo", dtype=None):
-    """Generate a sparse matrix of the given shape and density with uniformely
-    distributed values.
-
-    Parameters
-    ----------
-    m, n: int
-        shape of the matrix
-    density: real
-        density of the generated matrix: density equal to one means a full
-        matrix, density of 0 means a matrix with no non-zero items.
-    format: str
-        sparse matrix format.
-    dtype: dtype
-        type of the returned matrix values.
-
-    Notes
-    -----
-    Only float types are supported for now.
-    """
-    if density < 0 or density > 1:
-        raise ValueError("density expected to be 0 <= density <= 1")
-    if dtype and not dtype in [np.float32, np.float64, np.longdouble]:
-        raise NotImplementedError("type %s not supported" % dtype)
-
-    mn = m * n
-
-    # XXX: sparse uses intc instead of intp...
-    tp = np.intp
-    if mn > np.iinfo(tp).max:
-        msg = """\
-Trying to generate a random sparse matrix such as the product of dimensions is
-greater than %d - this is not supported on this machine
-"""
-        raise ValueError(msg % np.iinfo(tp).max)
-
-    # Number of non zero values
-    k = long(density * m * n)
-
-    # Generate a few more values than k so that we can get unique values
-    # afterwards.
-    # XXX: one could be smarter here
-    mlow = 5
-    fac = 1.02
-    gk = min(k + mlow, fac * k)
-
-    def _gen_unique_rand(_gk):
-        id = np.random.rand(_gk)
-        return np.unique(np.floor(id * mn))[:k]
-
-    id = _gen_unique_rand(gk)
-    while id.size < k:
-        gk *= 1.05
-        id = _gen_unique_rand(gk)
-
-    j = np.floor(id * 1. / m).astype(tp)
-    i = (id - j * m).astype(tp)
-    vals = np.random.rand(k).astype(dtype)
-    return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
-
-#################################
-# Deprecated functions
-################################
-
-__all__ += [ 'speye','spidentity', 'spkron', 'lil_eye', 'lil_diags' ]
-
-spkron = np.deprecate(kron, old_name='spkron', new_name='scipy.sparse.kron')
-speye = np.deprecate(eye, old_name='speye', new_name='scipy.sparse.eye')
-spidentity = np.deprecate(identity, old_name='spidentity',
-                                    new_name='scipy.sparse.identity')
-
-
-def lil_eye((r,c), k=0, dtype='d'):
-    """Generate a lil_matrix of dimensions (r,c) with the k-th
-    diagonal set to 1.
-
-    Parameters
-    ----------
-
-    r,c : int
-        row and column-dimensions of the output.
-    k : int
-        - diagonal offset.  In the output matrix,
-        - out[m,m+k] == 1 for all m.
-    dtype : dtype
-        data-type of the output array.
-
-    """
-    warn("lil_eye is deprecated." \
-            "use scipy.sparse.eye(r, c, k, format='lil') instead", \
-            DeprecationWarning)
-    return eye(r, c, k, dtype=dtype, format='lil')
-
-
-#TODO remove this function
-def lil_diags(diags, offsets, (m,n), dtype='d'):
-    """
-    Generate a lil_matrix with the given diagonals.
-
-    Parameters
-    ----------
-    diags : list of list of values e.g. [[1,2,3],[4,5]]
-        values to be placed on each indicated diagonal.
-    offsets : list of ints
-        diagonal offsets.  This indicates the diagonal on which
-        the given values should be placed.
-    (r,c) : tuple of ints
-        row and column dimensions of the output.
-    dtype : dtype
-        output data-type.
-
-    Examples
-    --------
-
-    >>> lil_diags([[1,2,3],[4,5],[6]],[0,1,2],(3,3)).todense()
-    matrix([[ 1.,  4.,  6.],
-            [ 0.,  2.,  5.],
-            [ 0.,  0.,  3.]])
-
-    """
-    offsets_unsorted = list(offsets)
-    diags_unsorted = list(diags)
-    if len(diags) != len(offsets):
-        raise ValueError("Number of diagonals provided should "
-                         "agree with offsets.")
-
-    sort_indices = np.argsort(offsets_unsorted)
-    diags = [diags_unsorted[k] for k in sort_indices]
-    offsets = [offsets_unsorted[k] for k in sort_indices]
-
-    for i,k in enumerate(offsets):
-        if len(diags[i]) < m-abs(k):
-            raise ValueError("Not enough values specified to fill "
-                             "diagonal %s." % k)
-
-    out = lil_matrix((m,n),dtype=dtype)
-
-    from itertools import izip
-    for k,diag in izip(offsets,diags):
-        for ix,c in enumerate(xrange(np.clip(k,0,n),np.clip(m+k,0,n))):
-            out.rows[c-k].append(c)
-            out.data[c-k].append(diag[ix])
-    return out
-
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/csr.py b/cobra/oven/danielhyduke/jython/scipy/sparse/csr.py
deleted file mode 100644
index b152dc1..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/csr.py
+++ /dev/null
@@ -1,652 +0,0 @@
-# This file was automatically generated by SWIG (http://www.swig.org).
-# Version 1.3.36
-#
-# Don't modify this file, modify the SWIG interface instead.
-# This file is compatible with both classic and new-style classes.
-
-import _csr
-import new
-new_instancemethod = new.instancemethod
-try:
-    _swig_property = property
-except NameError:
-    pass # Python < 2.2 doesn't have 'property'.
-def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
-    if (name == "thisown"): return self.this.own(value)
-    if (name == "this"):
-        if type(value).__name__ == 'PySwigObject':
-            self.__dict__[name] = value
-            return
-    method = class_type.__swig_setmethods__.get(name,None)
-    if method: return method(self,value)
-    if (not static) or hasattr(self,name):
-        self.__dict__[name] = value
-    else:
-        raise AttributeError("You cannot add attributes to %s" % self)
-
-def _swig_setattr(self,class_type,name,value):
-    return _swig_setattr_nondynamic(self,class_type,name,value,0)
-
-def _swig_getattr(self,class_type,name):
-    if (name == "thisown"): return self.this.own()
-    method = class_type.__swig_getmethods__.get(name,None)
-    if method: return method(self)
-    raise AttributeError,name
-
-def _swig_repr(self):
-    try: strthis = "proxy of " + self.this.__repr__()
-    except: strthis = ""
-    return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
-
-import types
-try:
-    _object = types.ObjectType
-    _newclass = 1
-except AttributeError:
-    class _object : pass
-    _newclass = 0
-del types
-
-
-
-def expandptr(*args):
-  """expandptr(int n_row, int Ap, int Bi)"""
-  return _csr.expandptr(*args)
-
-def csr_matmat_pass1(*args):
-  """
-    csr_matmat_pass1(int n_row, int n_col, int Ap, int Aj, int Bp, int Bj,
-        int Cp)
-    """
-  return _csr.csr_matmat_pass1(*args)
-
-def csr_count_blocks(*args):
-  """csr_count_blocks(int n_row, int n_col, int R, int C, int Ap, int Aj) -> int"""
-  return _csr.csr_count_blocks(*args)
-
-def csr_has_sorted_indices(*args):
-  """csr_has_sorted_indices(int n_row, int Ap, int Aj) -> bool"""
-  return _csr.csr_has_sorted_indices(*args)
-
-
-def csr_diagonal(*args):
-  """
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        signed char Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        unsigned char Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        unsigned short Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        unsigned int Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        long long Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        unsigned long long Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        long double Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        npy_cfloat_wrapper Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        npy_cdouble_wrapper Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        npy_clongdouble_wrapper Yx)
-    """
-  return _csr.csr_diagonal(*args)
-
-def csr_scale_rows(*args):
-  """
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        signed char Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        unsigned char Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        unsigned short Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        unsigned int Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        long long Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        unsigned long long Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        long double Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        npy_cfloat_wrapper Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        npy_cdouble_wrapper Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        npy_clongdouble_wrapper Xx)
-    """
-  return _csr.csr_scale_rows(*args)
-
-def csr_scale_columns(*args):
-  """
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        signed char Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        unsigned char Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        unsigned short Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        unsigned int Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        long long Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        unsigned long long Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        long double Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        npy_cfloat_wrapper Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        npy_cdouble_wrapper Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        npy_clongdouble_wrapper Xx)
-    """
-  return _csr.csr_scale_columns(*args)
-
-def csr_tocsc(*args):
-  """
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int Bp, int Bi, signed char Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int Bp, int Bi, unsigned char Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
-        int Bi, short Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int Bp, int Bi, unsigned short Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
-        int Bi, int Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int Bp, int Bi, unsigned int Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int Bp, int Bi, long long Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int Bp, int Bi, unsigned long long Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
-        int Bi, float Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
-        int Bi, double Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int Bp, int Bi, long double Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int Bp, int Bi, npy_cfloat_wrapper Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int Bp, int Bi, npy_cdouble_wrapper Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int Bp, int Bi, npy_clongdouble_wrapper Bx)
-    """
-  return _csr.csr_tocsc(*args)
-
-def csr_tobsr(*args):
-  """
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        signed char Ax, int Bp, int Bj, signed char Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        short Ax, int Bp, int Bj, short Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        int Ax, int Bp, int Bj, int Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        long long Ax, int Bp, int Bj, long long Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        float Ax, int Bp, int Bj, float Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        double Ax, int Bp, int Bj, double Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        long double Ax, int Bp, int Bj, long double Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
-        npy_clongdouble_wrapper Ax, int Bp, int Bj,
-        npy_clongdouble_wrapper Bx)
-    """
-  return _csr.csr_tobsr(*args)
-
-def csr_matmat_pass2(*args):
-  """
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int Bp, int Bj, signed char Bx, int Cp, int Cj,
-        signed char Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int Bp, int Bj, unsigned char Bx, int Cp,
-        int Cj, unsigned char Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int Bp, int Bj, unsigned short Bx, int Cp,
-        int Cj, unsigned short Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int Bp, int Bj, unsigned int Bx, int Cp,
-        int Cj, unsigned int Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int Bp, int Bj, long long Bx, int Cp, int Cj,
-        long long Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int Bp, int Bj, unsigned long long Bx,
-        int Cp, int Cj, unsigned long long Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int Bp, int Bj, long double Bx, int Cp, int Cj,
-        long double Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int Bp, int Bj, npy_cfloat_wrapper Bx,
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int Bp, int Bj, npy_cdouble_wrapper Bx,
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int Bp, int Bj, npy_clongdouble_wrapper Bx,
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
-    """
-  return _csr.csr_matmat_pass2(*args)
-
-def csr_matvec(*args):
-  """
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        signed char Xx, signed char Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        unsigned char Xx, unsigned char Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx,
-        short Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        unsigned short Xx, unsigned short Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx,
-        int Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        unsigned int Xx, unsigned int Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        long long Xx, long long Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        unsigned long long Xx, unsigned long long Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx,
-        float Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx,
-        double Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        long double Xx, long double Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
-    """
-  return _csr.csr_matvec(*args)
-
-def csr_matvecs(*args):
-  """
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, signed char Ax,
-        signed char Xx, signed char Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned char Ax,
-        unsigned char Xx, unsigned char Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, short Ax,
-        short Xx, short Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned short Ax,
-        unsigned short Xx, unsigned short Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, int Ax,
-        int Xx, int Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned int Ax,
-        unsigned int Xx, unsigned int Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, long long Ax,
-        long long Xx, long long Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, unsigned long long Ax,
-        unsigned long long Xx,
-        unsigned long long Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, float Ax,
-        float Xx, float Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, double Ax,
-        double Xx, double Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, long double Ax,
-        long double Xx, long double Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        npy_cfloat_wrapper Xx,
-        npy_cfloat_wrapper Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        npy_cdouble_wrapper Xx,
-        npy_cdouble_wrapper Yx)
-    csr_matvecs(int n_row, int n_col, int n_vecs, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        npy_clongdouble_wrapper Xx,
-        npy_clongdouble_wrapper Yx)
-    """
-  return _csr.csr_matvecs(*args)
-
-def csr_elmul_csr(*args):
-  """
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int Bp, int Bj, signed char Bx, int Cp, int Cj,
-        signed char Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int Bp, int Bj, unsigned char Bx, int Cp,
-        int Cj, unsigned char Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int Bp, int Bj, unsigned short Bx, int Cp,
-        int Cj, unsigned short Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int Bp, int Bj, unsigned int Bx, int Cp,
-        int Cj, unsigned int Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int Bp, int Bj, long long Bx, int Cp, int Cj,
-        long long Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int Bp, int Bj, unsigned long long Bx,
-        int Cp, int Cj, unsigned long long Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int Bp, int Bj, long double Bx, int Cp, int Cj,
-        long double Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int Bp, int Bj, npy_cfloat_wrapper Bx,
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int Bp, int Bj, npy_cdouble_wrapper Bx,
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int Bp, int Bj, npy_clongdouble_wrapper Bx,
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
-    """
-  return _csr.csr_elmul_csr(*args)
-
-def csr_eldiv_csr(*args):
-  """
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int Bp, int Bj, signed char Bx, int Cp, int Cj,
-        signed char Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int Bp, int Bj, unsigned char Bx, int Cp,
-        int Cj, unsigned char Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int Bp, int Bj, unsigned short Bx, int Cp,
-        int Cj, unsigned short Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int Bp, int Bj, unsigned int Bx, int Cp,
-        int Cj, unsigned int Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int Bp, int Bj, long long Bx, int Cp, int Cj,
-        long long Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int Bp, int Bj, unsigned long long Bx,
-        int Cp, int Cj, unsigned long long Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int Bp, int Bj, long double Bx, int Cp, int Cj,
-        long double Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int Bp, int Bj, npy_cfloat_wrapper Bx,
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int Bp, int Bj, npy_cdouble_wrapper Bx,
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int Bp, int Bj, npy_clongdouble_wrapper Bx,
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
-    """
-  return _csr.csr_eldiv_csr(*args)
-
-def csr_plus_csr(*args):
-  """
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int Bp, int Bj, signed char Bx, int Cp, int Cj,
-        signed char Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int Bp, int Bj, unsigned char Bx, int Cp,
-        int Cj, unsigned char Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int Bp, int Bj, unsigned short Bx, int Cp,
-        int Cj, unsigned short Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int Bp, int Bj, unsigned int Bx, int Cp,
-        int Cj, unsigned int Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int Bp, int Bj, long long Bx, int Cp, int Cj,
-        long long Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int Bp, int Bj, unsigned long long Bx,
-        int Cp, int Cj, unsigned long long Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int Bp, int Bj, long double Bx, int Cp, int Cj,
-        long double Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int Bp, int Bj, npy_cfloat_wrapper Bx,
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int Bp, int Bj, npy_cdouble_wrapper Bx,
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int Bp, int Bj, npy_clongdouble_wrapper Bx,
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
-    """
-  return _csr.csr_plus_csr(*args)
-
-def csr_minus_csr(*args):
-  """
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int Bp, int Bj, signed char Bx, int Cp, int Cj,
-        signed char Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int Bp, int Bj, unsigned char Bx, int Cp,
-        int Cj, unsigned char Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int Bp, int Bj, unsigned short Bx, int Cp,
-        int Cj, unsigned short Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int Bp, int Bj, unsigned int Bx, int Cp,
-        int Cj, unsigned int Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int Bp, int Bj, long long Bx, int Cp, int Cj,
-        long long Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int Bp, int Bj, unsigned long long Bx,
-        int Cp, int Cj, unsigned long long Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int Bp, int Bj, long double Bx, int Cp, int Cj,
-        long double Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int Bp, int Bj, npy_cfloat_wrapper Bx,
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int Bp, int Bj, npy_cdouble_wrapper Bx,
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int Bp, int Bj, npy_clongdouble_wrapper Bx,
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
-    """
-  return _csr.csr_minus_csr(*args)
-
-def csr_sort_indices(*args):
-  """
-    csr_sort_indices(int n_row, int Ap, int Aj, signed char Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned char Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, short Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned short Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, int Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned int Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, long long Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned long long Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, float Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, double Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, long double Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, npy_cfloat_wrapper Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, npy_cdouble_wrapper Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, npy_clongdouble_wrapper Ax)
-    """
-  return _csr.csr_sort_indices(*args)
-
-def csr_eliminate_zeros(*args):
-  """
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, signed char Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned char Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, short Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned short Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, int Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned int Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long long Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, float Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, double Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long double Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax)
-    """
-  return _csr.csr_eliminate_zeros(*args)
-
-def csr_sum_duplicates(*args):
-  """
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, signed char Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned char Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, short Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned short Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, int Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned int Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long long Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, float Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, double Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long double Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax)
-    """
-  return _csr.csr_sum_duplicates(*args)
-
-def get_csr_submatrix(*args):
-  """
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(signed char)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(unsigned char)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, short Ax, int ir0,
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(short)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(unsigned short)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, int Ax, int ir0,
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(int)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(unsigned int)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(long long)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int ir0, int ir1, int ic0, int ic1,
-        std::vector<(int)> Bp, std::vector<(int)> Bj,
-        std::vector<(unsigned long long)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, float Ax, int ir0,
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(float)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, double Ax, int ir0,
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(double)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
-        std::vector<(int)> Bj, std::vector<(long double)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int ir0, int ir1, int ic0, int ic1,
-        std::vector<(int)> Bp, std::vector<(int)> Bj,
-        std::vector<(npy_cfloat_wrapper)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int ir0, int ir1, int ic0, int ic1,
-        std::vector<(int)> Bp, std::vector<(int)> Bj,
-        std::vector<(npy_cdouble_wrapper)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int ir0, int ir1, int ic0, int ic1,
-        std::vector<(int)> Bp, std::vector<(int)> Bj,
-        std::vector<(npy_clongdouble_wrapper)> Bx)
-    """
-  return _csr.get_csr_submatrix(*args)
-
-def csr_sample_values(*args):
-  """
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, signed char Ax,
-        int n_samples, int Bi, int Bj, signed char Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
-        int n_samples, int Bi, int Bj, unsigned char Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, short Ax, int n_samples,
-        int Bi, int Bj, short Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
-        int n_samples, int Bi, int Bj, unsigned short Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, int Ax, int n_samples,
-        int Bi, int Bj, int Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
-        int n_samples, int Bi, int Bj, unsigned int Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, long long Ax,
-        int n_samples, int Bi, int Bj, long long Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
-        int n_samples, int Bi, int Bj, unsigned long long Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, float Ax, int n_samples,
-        int Bi, int Bj, float Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, double Ax, int n_samples,
-        int Bi, int Bj, double Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, long double Ax,
-        int n_samples, int Bi, int Bj, long double Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
-        int n_samples, int Bi, int Bj, npy_cfloat_wrapper Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
-        int n_samples, int Bi, int Bj, npy_cdouble_wrapper Bx)
-    csr_sample_values(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
-        int n_samples, int Bi, int Bj,
-        npy_clongdouble_wrapper Bx)
-    """
-  return _csr.csr_sample_values(*args)
-
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/lil.py b/cobra/oven/danielhyduke/jython/scipy/sparse/lil.py
deleted file mode 100644
index 3214994..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/lil.py
+++ /dev/null
@@ -1,454 +0,0 @@
-"""LInked List sparse matrix class
-"""
-
-__docformat__ = "restructuredtext en"
-
-__all__ = ['lil_matrix','isspmatrix_lil']
-
-from bisect import bisect_left
-
-import numpy as np
-
-from base import spmatrix, isspmatrix
-from sputils import getdtype, isshape, issequence, isscalarlike
-
-class lil_matrix(spmatrix):
-    """Row-based linked list sparse matrix
-
-    This is an efficient structure for constructing sparse
-    matrices incrementally.
-
-    This can be instantiated in several ways:
-        lil_matrix(D)
-            with a dense matrix or rank-2 ndarray D
-
-        lil_matrix(S)
-            with another sparse matrix S (equivalent to S.tocsc())
-
-        lil_matrix((M, N), [dtype])
-            to construct an empty matrix with shape (M, N)
-            dtype is optional, defaulting to dtype='d'.
-
-    Notes
-    -----
-
-    Advantages of the LIL format
-        - supports flexible slicing
-        - changes to the matrix sparsity structure are efficient
-
-    Disadvantages of the LIL format
-        - arithmetic operations LIL + LIL are slow (consider CSR or CSC)
-        - slow column slicing (consider CSC)
-        - slow matrix vector products (consider CSR or CSC)
-
-    Intended Usage
-        - LIL is a convenient format for constructing sparse matrices
-        - once a matrix has been constructed, convert to CSR or
-          CSC format for fast arithmetic and matrix vector operations
-        - consider using the COO format when constructing large matrices
-
-    Data Structure
-        - An array (``self.rows``) of rows, each of which is a sorted
-          list of column indices of non-zero elements.
-        - The corresponding nonzero values are stored in similar
-          fashion in ``self.data``.
-
-
-    """
-
-    def __init__(self, arg1, shape=None, dtype=None, copy=False):
-        spmatrix.__init__(self)
-        self.dtype = getdtype(dtype, arg1, default=float)
-
-        # First get the shape
-        if isspmatrix(arg1):
-            if isspmatrix_lil(arg1) and copy:
-                A = arg1.copy()
-            else:
-                A = arg1.tolil()
-
-            if dtype is not None:
-                A = A.astype(dtype)
-
-            self.shape = A.shape
-            self.dtype = A.dtype
-            self.rows  = A.rows
-            self.data  = A.data
-        elif isinstance(arg1,tuple):
-            if isshape(arg1):
-                if shape is not None:
-                    raise ValueError('invalid use of shape parameter')
-                M, N = arg1
-                self.shape = (M,N)
-                self.rows = np.empty((M,), dtype=object)
-                self.data = np.empty((M,), dtype=object)
-                for i in range(M):
-                    self.rows[i] = []
-                    self.data[i] = []
-            else:
-                raise TypeError('unrecognized lil_matrix constructor usage')
-        else:
-            #assume A is dense
-            try:
-                A = np.asmatrix(arg1)
-            except TypeError:
-                raise TypeError('unsupported matrix type')
-            else:
-                from csr import csr_matrix
-                A = csr_matrix(A, dtype=dtype).tolil()
-
-                self.shape = A.shape
-                self.dtype = A.dtype
-                self.rows  = A.rows
-                self.data  = A.data
-
-    def __iadd__(self,other):
-        self[:,:] = self + other
-        return self
-
-    def __isub__(self,other):
-        self[:,:] = self - other
-        return self
-
-    def __imul__(self,other):
-        if isscalarlike(other):
-            self[:,:] = self * other
-            return self
-        else:
-            raise NotImplementedError
-
-    def __itruediv__(self,other):
-        if isscalarlike(other):
-            self[:,:] = self / other
-            return self
-        else:
-            raise NotImplementedError
-
-    # Whenever the dimensions change, empty lists should be created for each
-    # row
-
-    def getnnz(self):
-        return sum([len(rowvals) for rowvals in self.data])
-    nnz = property(fget=getnnz)
-
-    def __str__(self):
-        val = ''
-        for i, row in enumerate(self.rows):
-            for pos, j in enumerate(row):
-                val += "  %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
-        return val[:-1]
-
-    def getrowview(self, i):
-        """Returns a view of the 'i'th row (without copying).
-        """
-        new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
-        new.rows[0] = self.rows[i]
-        new.data[0] = self.data[i]
-        return new
-
-    def getrow(self, i):
-        """Returns a copy of the 'i'th row.
-        """
-        new = lil_matrix((1, self.shape[1]), dtype=self.dtype)
-        new.rows[0] = self.rows[i][:]
-        new.data[0] = self.data[i][:]
-        return new
-
-    def _get1(self, i, j):
-
-        if i < 0:
-            i += self.shape[0]
-        if i < 0 or i >= self.shape[0]:
-            raise IndexError('row index out of bounds')
-
-        if j < 0:
-            j += self.shape[1]
-        if j < 0 or j >= self.shape[1]:
-            raise IndexError('column index out of bounds')
-
-        row  = self.rows[i]
-        data = self.data[i]
-
-        pos = bisect_left(row, j)
-        if pos != len(data) and row[pos] == j:
-            return data[pos]
-        else:
-            return 0
-
-    def _slicetoseq(self, j, shape):
-        if j.start is not None and j.start < 0:
-            start =  shape + j.start
-        elif j.start is None:
-            start = 0
-        else:
-            start = j.start
-        if j.stop is not None and j.stop < 0:
-            stop = shape + j.stop
-        elif j.stop is None:
-            stop = shape
-        else:
-            stop = j.stop
-        j = range(start, stop, j.step or 1)
-        return j
-
-
-    def __getitem__(self, index):
-        """Return the element(s) index=(i, j), where j may be a slice.
-        This always returns a copy for consistency, since slices into
-        Python lists return copies.
-        """
-        try:
-            i, j = index
-        except (AssertionError, TypeError):
-            raise IndexError('invalid index')
-
-        if np.isscalar(i):
-            if np.isscalar(j):
-                return self._get1(i, j)
-            if isinstance(j, slice):
-                j = self._slicetoseq(j, self.shape[1])
-            if issequence(j):
-                return self.__class__([[self._get1(i, jj) for jj in j]])
-        elif issequence(i) and issequence(j):
-            return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]])
-        elif issequence(i) or isinstance(i, slice):
-            if isinstance(i, slice):
-                i = self._slicetoseq(i, self.shape[0])
-            if np.isscalar(j):
-                return self.__class__([[self._get1(ii, j)] for ii in i])
-            if isinstance(j, slice):
-                j = self._slicetoseq(j, self.shape[1])
-            if issequence(j):
-                return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i])
-        else:
-            raise IndexError
-
-    def _insertat2(self, row, data, j, x):
-        """ helper for __setitem__: insert a value in the given row/data at
-        column j. """
-
-        if j < 0: #handle negative column indices
-            j += self.shape[1]
-
-        if j < 0 or j >= self.shape[1]:
-            raise IndexError('column index out of bounds')
-
-        if not np.isscalar(x):
-            raise ValueError('setting an array element with a sequence')
-
-        try:
-            x = self.dtype.type(x)
-        except:
-            raise TypeError('Unable to convert value (%s) to dtype [%s]' % (x,self.dtype.name))
-
-        pos = bisect_left(row, j)
-        if x != 0:
-            if pos == len(row):
-                row.append(j)
-                data.append(x)
-            elif row[pos] != j:
-                row.insert(pos, j)
-                data.insert(pos, x)
-            else:
-                data[pos] = x
-        else:
-            if pos < len(row) and row[pos] == j:
-                del row[pos]
-                del data[pos]
-
-    def _setitem_setrow(self, row, data, j, xrow, xdata, xcols):
-        if isinstance(j, slice):
-            j = self._slicetoseq(j, self.shape[1])
-        if issequence(j):
-            if xcols == len(j):
-                for jj, xi in zip(j, xrange(xcols)):
-                   pos = bisect_left(xrow, xi)
-                   if pos != len(xdata) and xrow[pos] == xi:
-                       self._insertat2(row, data, jj, xdata[pos])
-                   else:
-                       self._insertat2(row, data, jj, 0)
-            elif xcols == 1:           # OK, broadcast across row
-                if len(xdata) > 0 and xrow[0] == 0:
-                    val = xdata[0]
-                else:
-                    val = 0
-                for jj in j:
-                    self._insertat2(row, data, jj,val)
-            else:
-                raise IndexError('invalid index')
-        elif np.isscalar(j):
-            if not xcols == 1:
-                raise ValueError('array dimensions are not compatible for copy')
-            if len(xdata) > 0 and xrow[0] == 0:
-                self._insertat2(row, data, j, xdata[0])
-            else:
-                self._insertat2(row, data, j, 0)
-        else:
-            raise ValueError('invalid column value: %s' % str(j))
-
-    def __setitem__(self, index, x):
-        try:
-            i, j = index
-        except (ValueError, TypeError):
-            raise IndexError('invalid index')
-
-        # shortcut for common case of single entry assign:
-        if np.isscalar(x) and np.isscalar(i) and np.isscalar(j):
-            self._insertat2(self.rows[i], self.data[i], j, x)
-            return
-
-        # shortcut for common case of full matrix assign:
-        if isspmatrix(x):
-          if isinstance(i, slice) and i == slice(None) and \
-             isinstance(j, slice) and j == slice(None):
-               x = lil_matrix(x)
-               self.rows = x.rows
-               self.data = x.data
-               return
-
-        if isinstance(i, tuple):       # can't index lists with tuple
-            i = list(i)
-
-        if np.isscalar(i):
-            rows = [self.rows[i]]
-            datas = [self.data[i]]
-        else:
-            rows = self.rows[i]
-            datas = self.data[i]
-
-        x = lil_matrix(x, copy=False)
-        xrows, xcols = x.shape
-        if xrows == len(rows):    # normal rectangular copy
-            for row, data, xrow, xdata in zip(rows, datas, x.rows, x.data):
-                self._setitem_setrow(row, data, j, xrow, xdata, xcols)
-        elif xrows == 1:          # OK, broadcast down column
-            for row, data in zip(rows, datas):
-                self._setitem_setrow(row, data, j, x.rows[0], x.data[0], xcols)
-
-        # needed to pass 'test_lil_sequence_assignement' unit test:
-        # -- set row from column of entries --
-        elif xcols == len(rows):
-            x = x.T
-            for row, data, xrow, xdata in zip(rows, datas, x.rows, x.data):
-                self._setitem_setrow(row, data, j, xrow, xdata, xrows)
-        else:
-            raise IndexError('invalid index')
-
-    def _mul_scalar(self, other):
-        if other == 0:
-            # Multiply by zero: return the zero matrix
-            new = lil_matrix(self.shape, dtype=self.dtype)
-        else:
-            new = self.copy()
-            # Multiply this scalar by every element.
-            new.data = np.array([[val*other for val in rowvals] for
-                                  rowvals in new.data], dtype=object)
-        return new
-
-    def __truediv__(self, other):           # self / other
-        if isscalarlike(other):
-            new = self.copy()
-            # Divide every element by this scalar
-            new.data = np.array([[val/other for val in rowvals] for
-                                  rowvals in new.data], dtype=object)
-            return new
-        else:
-            return self.tocsr() / other
-
-## This code doesn't work with complex matrices
-#    def multiply(self, other):
-#        """Point-wise multiplication by another lil_matrix.
-#
-#        """
-#        if np.isscalar(other):
-#            return self.__mul__(other)
-#
-#        if isspmatrix_lil(other):
-#            reference,target = self,other
-#
-#            if reference.shape != target.shape:
-#                raise ValueError("Dimensions do not match.")
-#
-#            if len(reference.data) > len(target.data):
-#                reference,target = target,reference
-#
-#            new = lil_matrix(reference.shape)
-#            for r,row in enumerate(reference.rows):
-#                tr = target.rows[r]
-#                td = target.data[r]
-#                rd = reference.data[r]
-#                L = len(tr)
-#                for c,column in enumerate(row):
-#                    ix = bisect_left(tr,column)
-#                    if ix < L and tr[ix] == column:
-#                        new.rows[r].append(column)
-#                        new.data[r].append(rd[c] * td[ix])
-#            return new
-#        else:
-#            raise ValueError("Point-wise multiplication only allowed "
-#                             "with another lil_matrix.")
-
-    def copy(self):
-        from copy import deepcopy
-        new = lil_matrix(self.shape, dtype=self.dtype)
-        new.data = deepcopy(self.data)
-        new.rows = deepcopy(self.rows)
-        return new
-
-    def reshape(self,shape):
-        new = lil_matrix(shape, dtype=self.dtype)
-        j_max = self.shape[1]
-        for i,row in enumerate(self.rows):
-            for col,j in enumerate(row):
-                new_r,new_c = np.unravel_index(i*j_max + j,shape)
-                new[new_r,new_c] = self[i,j]
-        return new
-
-    def toarray(self):
-        d = np.zeros(self.shape, dtype=self.dtype)
-        for i, row in enumerate(self.rows):
-            for pos, j in enumerate(row):
-                d[i, j] = self.data[i][pos]
-        return d
-
-    def transpose(self):
-        return self.tocsr().transpose().tolil()
-
-    def tolil(self, copy=False):
-        if copy:
-            return self.copy()
-        else:
-            return self
-
-    def tocsr(self):
-        """ Return Compressed Sparse Row format arrays for this matrix.
-        """
-
-        indptr = np.asarray([len(x) for x in self.rows], dtype=np.intc)
-        indptr = np.concatenate( (np.array([0], dtype=np.intc), np.cumsum(indptr)) )
-
-        nnz = indptr[-1]
-
-        indices = []
-        for x in self.rows:
-            indices.extend(x)
-        indices = np.asarray(indices, dtype=np.intc)
-
-        data = []
-        for x in self.data:
-            data.extend(x)
-        data = np.asarray(data, dtype=self.dtype)
-
-        from csr import csr_matrix
-        return csr_matrix((data, indices, indptr), shape=self.shape)
-
-    def tocsc(self):
-        """ Return Compressed Sparse Column format arrays for this matrix.
-        """
-        return self.tocsr().tocsc()
-
-
-from sputils import _isinstance
-
-def isspmatrix_lil( x ):
-    return _isinstance(x, lil_matrix)
diff --git a/cobra/oven/danielhyduke/jython/scipy/sparse/sputils.py b/cobra/oven/danielhyduke/jython/scipy/sparse/sputils.py
deleted file mode 100644
index 4e0edcf..0000000
--- a/cobra/oven/danielhyduke/jython/scipy/sparse/sputils.py
+++ /dev/null
@@ -1,133 +0,0 @@
-""" Utility functions for sparse matrix module
-"""
-
-__all__ = ['upcast','getdtype','isscalarlike','isintlike',
-            'isshape','issequence','isdense']
-
-import numjy as np
-
-# keep this list syncronized with sparsetools
-#supported_dtypes = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
-#        'int64', 'uint64', 'float32', 'float64',
-#        'complex64', 'complex128']
-supported_dtypes = ['int8','uint8','short','ushort','intc','uintc',
-        'longlong','ulonglong','single','double','longdouble',
-        'csingle','cdouble','clongdouble']
-supported_dtypes = [ np.typeDict[x] for x in supported_dtypes]
-
-def upcast(*args):
-    """Returns the nearest supported sparse dtype for the
-    combination of one or more types.
-
-    upcast(t0, t1, ..., tn) -> T  where T is a supported dtype
-
-    Examples
-    --------
-
-    >>> upcast('int32')
-    <type 'numpy.int32'>
-    >>> upcast('bool')
-    <type 'numpy.int8'>
-    >>> upcast('int32','float32')
-    <type 'numpy.float64'>
-    >>> upcast('bool',complex,float)
-    <type 'numpy.complex128'>
-
-    """
-    sample = np.array([0],dtype=args[0])
-    for t in args[1:]:
-        sample = sample + np.array([0],dtype=t)
-
-    upcast = sample.dtype
-
-    for t in supported_dtypes:
-        if np.can_cast(sample.dtype,t):
-            return t
-
-    raise TypeError,'no supported conversion for types: %s' % args
-
-
-def to_native(A):
-    return np.asarray(A,dtype=A.dtype.newbyteorder('native'))
-
-
-def getdtype(dtype, a=None, default=None):
-    """Function used to simplify argument processing.  If 'dtype' is not
-    specified (is None), returns a.dtype; otherwise returns a np.dtype
-    object created from the specified dtype argument.  If 'dtype' and 'a'
-    are both None, construct a data type out of the 'default' parameter.
-    Furthermore, 'dtype' must be in 'allowed' set.
-    """
-    #TODO is this really what we want?
-    canCast = True
-    if dtype is None:
-        try:
-            newdtype = a.dtype
-        except AttributeError:
-            if default is not None:
-                newdtype = np.dtype(default)
-                canCast = False
-            else:
-                raise TypeError, "could not interpret data type"
-    else:
-        newdtype = np.dtype(dtype)
-
-    return newdtype
-
-def isscalarlike(x):
-    """Is x either a scalar, an array scalar, or a 0-dim array?"""
-    return np.isscalar(x) or (isdense(x) and x.ndim == 0)
-
-def isintlike(x):
-    """Is x appropriate as an index into a sparse matrix? Returns True
-    if it can be cast safely to a machine int.
-    """
-    if issequence(x):
-        return False
-    else:
-        try:
-            if int(x) == x:
-                return True
-            else:
-                return False
-        except TypeError:
-            return False
-
-def isshape(x):
-    """Is x a valid 2-tuple of dimensions?
-    """
-    try:
-        # Assume it's a tuple of matrix dimensions (M, N)
-        (M, N) = x
-    except:
-        return False
-    else:
-        if isintlike(M) and isintlike(N):
-            if np.rank(M) == 0 and np.rank(N) == 0:
-                return True
-        return False
-
-
-def issequence(t):
-    #Modded for numjy
-    if isinstance(t, (list, tuple)):
-        return True
-    elif hassattr(t, '._M'):
-        return (isinstance(t._M, np.ndarray) and (t.ndim == 1))
-    else:
-        return False
-    
-
-def _isinstance(x, _class):
-    ##
-    # This makes scipy.sparse.sparse.csc_matrix == __main__.csc_matrix.
-    c1 = ('%s' % x.__class__).split( '.' )
-    c2 = ('%s' % _class).split( '.' )
-    aux = c1[-1] == c2[-1]
-    return isinstance(x, _class) or aux
-
-def isdense(x):
-    #Modded for numjy
-    if hasattr(x,'_M'):
-        return _isinstance(x._M, np.ndarray)
-    raise Exception('The matrix must be created by numjy')
diff --git a/cobra/oven/danielhyduke/query/__init__.py b/cobra/oven/danielhyduke/query/__init__.py
deleted file mode 100644
index e7bee6d..0000000
--- a/cobra/oven/danielhyduke/query/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from query import *
diff --git a/cobra/oven/danielhyduke/query/query.py b/cobra/oven/danielhyduke/query/query.py
deleted file mode 100644
index 353258b..0000000
--- a/cobra/oven/danielhyduke/query/query.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#cobra.query.query.py
-#Will serve as a location to house the growing number of
-#simple query functions attached to cobra.Model
-
-#NOTE: Many of the find functions are gone because Reactions,
-#Metabolites, and Genes are now away of each other.
-
-import re
-#####
-def print_reactions_involving_metabolite(cobra_model, the_metabolites):
-    """Update to allow for multiple metabolite search
-
-    cobra_model: A cobra.Model object
-
-    the_metabolites: A list of cobra.Metabolites or metabolite ids that are in
-    cobra_metabolites.
-
-    #TODO: Move this to the Metabolite class
-
-    """
-    if hasattr(the_metabolites, 'id'):
-        the_metabolites = [the_metabolites]
-    elif not hasattr(the_metabolites, '__iter__'):
-        the_metabolites = [the_metabolites]
-    if not hasattr(the_metabolites[0], 'id'):
-        the_metabolites = [cobra_model.metabolites[cobra_model.metabolites.index(x)]
-                           for x in the_metabolites]
-        
-    for the_metabolite in the_metabolties:
-        for the_reaction in the_metabolite._reaction:
-            print the_reaction.reaction
- 
-         
-def get_translation_reactions(cobra_model, genes_of_interest):
-    """Find the translation elongation reactions for a set of genes
-    in a cobra model.  Related to ME-model extensions
-
-    cobra_model:  A cobra.Model object.
-
-    genes_of_interest:  A list of genes from cobra_model.genes.
-    
-    """
-    gene_translation_reactions = defaultdict(list)
-    for the_reaction in cobra_model.reactions:
-        if 'translation_elongation' in the_reaction:
-            for the_gene in genes_of_interest:
-                if the_gene in the_reaction:
-                    gene_translation_reactions[the_gene].append(the_reaction)
-                    continue
-    return gene_translation_reactions
-
-
-if __name__ == '__main__':
-    from cPickle import load
-    from time import time
-    solver = 'glpk'
-    test_directory = '../test/data/'
-    with open(test_directory + 'salmonella.pickle') as in_file:
-        cobra_model = load(in_file)
-
-    #TODO: Add in tests for each function
-    print 'Need to add in tests for %s'%repr(['print_reactions_involving_metabolite'])
-                                              
diff --git a/cobra/solvers/__init__.py b/cobra/solvers/__init__.py
index e4a82b1..e2dd02d 100644
--- a/cobra/solvers/__init__.py
+++ b/cobra/solvers/__init__.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
 # Solvers are expected to follow the following interface
 # create_problem: makes a solver problem object from a cobra.model and
 # sets parameters (if possible)
@@ -21,8 +23,12 @@
 # This attempts to import all working solvers in this directory
 
 from __future__ import absolute_import
-from warnings import warn
+
+import logging
 from os import listdir, path
+from warnings import warn
+
+LOGGER = logging.getLogger(__name__)
 
 solver_dict = {}
 possible_solvers = set()
@@ -39,7 +45,7 @@ def add_solver(solver_name, use_name=None):
             use_name = solver_name
     solver_dict[use_name] = eval(solver_name)
 
-    
+
 for i in listdir(path.dirname(path.abspath(__file__))):
     if i.startswith("_") or i.startswith(".") or i.startswith('legacy'):
         continue
@@ -53,10 +59,14 @@ if "wrappers" in possible_solvers:
     possible_solvers.remove("wrappers")
 
 for solver in possible_solvers:
+    LOGGER.debug("adding '%s'...", solver)
     try:
         add_solver(solver)
-    except:
+    except Exception as err:
+        LOGGER.debug("addition failed: %s", str(err))
         pass
+    else:
+        LOGGER.debug("success!")
     del solver
 
 if len(solver_dict) == 0:
diff --git a/cobra/solvers/cglpk.pyx b/cobra/solvers/cglpk.pyx
index 0d4fd3b..80ca79f 100644
--- a/cobra/solvers/cglpk.pyx
+++ b/cobra/solvers/cglpk.pyx
@@ -6,13 +6,13 @@ from libc.stdlib cimport malloc, free
 from cpython cimport bool
 from cpython.version cimport PY_MAJOR_VERSION
 
+import sys
 from tempfile import NamedTemporaryFile as _NamedTemporaryFile  # for pickling
 from os import unlink as _unlink
-import sys
 from contextlib import contextmanager as _contextmanager
 from warnings import warn as _warn
 
-from six import StringIO
+from six import StringIO, iteritems
 try:
     from sympy import Basic, Number
 except:
@@ -20,6 +20,8 @@ except:
         pass
     Number = Basic
 
+from cobra.core.solution import LegacySolution
+
 __glpk_version__ = str(glp_version())
 _SUPPORTS_MILP = True
 solver_name = "cglpk"
@@ -208,7 +210,7 @@ cdef class GLP:
             else:
                 raise ValueError("unsupported bound type: %s" % c)
             glp_set_row_bnds(glp, index, bound_type, b, b)
-        
+
         # set reaction/varaiable bounds
         for index, reaction in enumerate(cobra_model.reactions, 1):
             if reaction.variable_kind == "integer":
@@ -226,12 +228,12 @@ cdef class GLP:
             glp_set_obj_coef(glp, index,
                              _to_double(reaction.objective_coefficient))
 
-            for metabolite, coefficient in reaction._metabolites.iteritems():
+            for metabolite, coefficient in iteritems(reaction.metabolites):
                 linear_constraint_rows.append(
                     metabolite_id_to_index[metabolite.id])
                 linear_constraint_cols.append(index)
                 linear_constraint_values.append(coefficient)
-        
+
         # set constraint marix
         # first copy the python lists to c arrays
         n_values = downcast_pos_size(len(linear_constraint_rows))
@@ -315,7 +317,7 @@ cdef class GLP:
         self.parameters.tm_lim = min(500, time_limit)
         fast_status = glp_simplex(glp, &self.parameters)
         self.parameters.tm_lim = time_limit
-        
+
         if fast_status != 0:
             with quiet(self.parameters.msg_lev):
                 glp_adv_basis(glp, 0)
@@ -448,11 +450,10 @@ cdef class GLP:
     def format_solution(self, cobra_model):
         cdef int i, m, n
         cdef glp_prob *glp = self.glp
-        Solution = cobra_model.solution.__class__
         status = self.get_status()
         if status != "optimal":  # todo handle other possible
-            return Solution(None, status=status)
-        solution = Solution(self.get_objective_value(), status=status)
+            return LegacySolution(None, status=status)
+        solution = LegacySolution(self.get_objective_value(), status=status)
         m = glp_get_num_rows(glp)
         n = glp_get_num_cols(glp)
         x = [0] * n
diff --git a/cobra/solvers/coin.py b/cobra/solvers/coin.py
index d4ca17c..c9ff9c3 100644
--- a/cobra/solvers/coin.py
+++ b/cobra/solvers/coin.py
@@ -1,6 +1,11 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from cobra.core.solution import LegacySolution
+
 from cylp.cy import CyClpSimplex
-from cylp.py.modeling.CyLPModel import CyLPArray
 from cylp.cy.CyCoinPackedMatrix import CyCoinPackedMatrix
+from cylp.py.modeling.CyLPModel import CyLPArray
 
 solver_name = "coin"
 _status_translation = {"primal infeasible": "infeasible",
@@ -90,11 +95,10 @@ def solve_problem(lp, **kwargs):
 
 
 def format_solution(lp, cobra_model):
-    Solution = cobra_model.solution.__class__
     status = get_status(lp)
     if status != "optimal":  # todo handle other possible
-        return Solution(None, status=status)
-    solution = Solution(lp.objectiveValue_, status=status)
+        return LegacySolution(None, status=status)
+    solution = LegacySolution(lp.objectiveValue_, status=status)
     x = lp.primalVariableSolution_["v"].tolist()
     solution.x_dict = {r.id: x[i] for i, r in enumerate(cobra_model.reactions)}
     solution.x = x
diff --git a/cobra/solvers/cplex_solver.py b/cobra/solvers/cplex_solver.py
index 9072842..5d60dc1 100644
--- a/cobra/solvers/cplex_solver.py
+++ b/cobra/solvers/cplex_solver.py
@@ -1,15 +1,18 @@
+# -*- coding: utf-8 -*-
+
 # Interface to ilog/cplex 12.4 python interface
 
-from copy import deepcopy
-from warnings import warn
+from __future__ import absolute_import
+
 import sys
+from warnings import warn
 
 from cplex import Cplex, SparsePair
 from cplex.exceptions import CplexError
-
-from ..core.Solution import Solution
+from six import iteritems, string_types
 from six.moves import zip
-from six import string_types, iteritems
+
+from cobra.core.solution import LegacySolution
 
 try:
     from sympy import Basic, Number
@@ -92,7 +95,7 @@ def format_solution(lp, cobra_model, **kwargs):
     else:
         x = y = x_dict = y_dict = objective_value = None
 
-    return Solution(objective_value, x=x, x_dict=x_dict, status=status,
+    return LegacySolution(objective_value, x=x, x_dict=x_dict, status=status,
                     y=y, y_dict=y_dict)
 
 
diff --git a/cobra/solvers/cplex_solver_java.py b/cobra/solvers/cplex_solver_java.py
index 56cd03c..59a75ab 100644
--- a/cobra/solvers/cplex_solver_java.py
+++ b/cobra/solvers/cplex_solver_java.py
@@ -1,25 +1,30 @@
+# -*- coding: utf-8 -*-
 # PLEASE NOTE THAT JYTHON SUPPORT (and this jython-only-solver) is deprecated
 #Interface to ilog/cplex 12.4 python / jython interfaces
 #QPs are not yet supported under jython
-from __future__ import print_function
-from os import name as __name
+from __future__ import absolute_import, print_function
+
 from copy import deepcopy
+from os import name as __name
+from time import time
 from warnings import warn
-###solver specific parameters
-from .parameters import status_dict, variable_kind_dict, \
-     sense_dict, parameter_mappings, parameter_defaults, \
-     objective_senses, default_objective_sense
 
-from ..core.Solution import Solution
-from time import time
 from six import iteritems
+
+from ilog.concert import IloNumVarType, IloObjectiveSense
+from ilog.cplex import IloCplex
+from ilog.cplex.IloCplex import DoubleParam, IntParam, StringParam
+
+from ..core.solution import Solution
+###solver specific parameters
+from .parameters import (
+    default_objective_sense, objective_senses, parameter_defaults,
+    parameter_mappings, sense_dict, status_dict, variable_kind_dict)
+
 solver_name = 'cplex'
 parameter_defaults = parameter_defaults[solver_name]
 sense_dict = eval(sense_dict[solver_name])
 
-from ilog.cplex import IloCplex
-from ilog.cplex.IloCplex import DoubleParam, IntParam, StringParam
-from ilog.concert import IloNumVarType, IloObjectiveSense 
 #__solver_class = IloCplex
 status_dict = eval(status_dict[solver_name])
 class Problem(IloCplex):
diff --git a/cobra/solvers/esolver.py b/cobra/solvers/esolver.py
index 4a46308..12a08c5 100644
--- a/cobra/solvers/esolver.py
+++ b/cobra/solvers/esolver.py
@@ -1,12 +1,18 @@
-from subprocess import check_output, check_call, CalledProcessError
-from os import unlink, devnull
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from fractions import Fraction
+from os import devnull, unlink
 from os.path import isfile
+from subprocess import CalledProcessError, check_call, check_output
 from tempfile import NamedTemporaryFile
-from fractions import Fraction
+
 from six.moves import zip
 
-from . import cglpk
-from .wrappers import *
+from cobra.core.solution import LegacySolution
+from cobra.solvers import cglpk
+from cobra.solvers.wrappers import *
 
 # detect paths to system calls for esolver and gzip
 with open(devnull, "w") as DEVNULL:
@@ -120,7 +126,7 @@ class Esolver(cglpk.GLP):
 
     def format_solution(self, cobra_model):
         m = cobra_model
-        solution = m.solution.__class__(None)
+        solution = LegacySolution(None)
         with open(self.solution_filepath) as infile:
             solution.status = infile.readline().split("=")[1].strip().lower()
             if solution.status != "optimal":
diff --git a/cobra/solvers/glpk_solver.py b/cobra/solvers/glpk_solver.py
index 7115d36..e30f08a 100644
--- a/cobra/solvers/glpk_solver.py
+++ b/cobra/solvers/glpk_solver.py
@@ -1,17 +1,24 @@
+# -*- coding: utf-8 -*-
 ##cobra.solvers.glpk_solver
 #This script provides wrappers for pyglpk 0.3
-from warnings import warn
+from __future__ import absolute_import
+
 from copy import deepcopy
+from warnings import warn
+
+from six import iteritems
+
+from glpk import LPX
+
+from cobra.core.solution import LegacySolution
+
 try:
     # Import izip for python versions < 3.x
     from itertools import izip as zip
 except ImportError:
     pass
 
-from six import iteritems
-from glpk import LPX
 
-from ..core.Solution import Solution
 
 solver_name = 'glpk'
 _SUPPORTS_MILP = True
@@ -41,7 +48,7 @@ def get_objective_value(lp):
 def format_solution(lp, cobra_model, **kwargs):
     status = get_status(lp)
     if status == 'optimal':
-        sol = Solution(lp.obj.value, status=status)
+        sol = LegacySolution(lp.obj.value, status=status)
         sol.x = [float(c.primal) for c in lp.cols]
         sol.x_dict = {c.name: c.primal for c in lp.cols}
 
@@ -51,7 +58,7 @@ def format_solution(lp, cobra_model, **kwargs):
             sol.y_dict = {c.name: c.dual for c in lp.rows}
         return sol
 
-    return Solution(None, status=status)
+    return LegacySolution(None, status=status)
 
 def set_parameter(lp, parameter_name, parameter_value):
     """with pyglpk the parameters are set during the solve phase, with
@@ -226,12 +233,12 @@ def solve(cobra_model, **kwargs):
     """Smart interface to optimization solver functions that will convert
     the cobra_model to a solver object, set the parameters, and try multiple
     methods to get an optimal solution before returning the solver object and
-    a cobra.Solution (which is attached to cobra_model.solution)
+    a cobra.LegacySolution
 
     cobra_model: a cobra.Model
 
     returns a dict: {'the_problem': solver specific object, 'the_solution':
-    cobra.Solution for the optimization problem'}
+    cobra.solution for the optimization problem'}
 
 
     """
diff --git a/cobra/solvers/glpk_solver_java.py b/cobra/solvers/glpk_solver_java.py
index 3fa9a19..6aec7d6 100644
--- a/cobra/solvers/glpk_solver_java.py
+++ b/cobra/solvers/glpk_solver_java.py
@@ -1,26 +1,31 @@
+# -*- coding: utf-8 -*-
 # PLEASE NOTE THAT JYTHON SUPPORT (and this jython-only-solver) is deprecated
 #This script provides wrappers for libglpk-java 1.0.22 and pyglpk 0.3
-from __future__ import print_function
-from warnings import warn
-from copy import deepcopy
-###solver specific parameters
-from .parameters import status_dict, variable_kind_dict, \
-     sense_dict, parameter_mappings, parameter_defaults, \
-     objective_senses, default_objective_sense
+from __future__ import absolute_import, print_function
 
-from ..core.Solution import Solution
+from copy import deepcopy
+from os import name
 from time import time
+from warnings import warn
+
 from six import iteritems
+
+from org.gnu.glpk import GLPK, GLPKConstants, glp_iocp, glp_smcp
+
+from ..core.solution import Solution
+###solver specific parameters
+from .parameters import (
+    default_objective_sense, objective_senses, parameter_defaults,
+    parameter_mappings, sense_dict, status_dict, variable_kind_dict)
+
 solver_name = 'glpk'
 sense_dict = eval(sense_dict[solver_name])
 #Functions that are different for java implementation of a solver
 
-from os import name
 if name != "java":
     raise Exception("jython only")
 
 warn("cobra.solvers.glpk_solver isn't mature.  consider using gurobi or cplex")
-from org.gnu.glpk import GLPK, GLPKConstants, glp_smcp, glp_iocp
 variable_kind_dict = eval(variable_kind_dict['%s_%s'%(solver_name,
                                             __name)])
 status_dict = eval(status_dict['%s_%s'%(solver_name,
@@ -308,12 +313,12 @@ def solve(cobra_model, **kwargs):
     """Smart interface to optimization solver functions that will convert
     the cobra_model to a solver object, set the parameters, and try multiple
     methods to get an optimal solution before returning the solver object and
-    a cobra.Solution (which is attached to cobra_model.solution)
+    a cobra.solution (which is attached to cobra_model.solution)
 
     cobra_model: a cobra.Model
 
     returns a dict: {'the_problem': solver specific object, 'the_solution':
-    cobra.Solution for the optimization problem'}
+    cobra.solution for the optimization problem'}
     
 
     """
diff --git a/cobra/solvers/gurobi_solver.py b/cobra/solvers/gurobi_solver.py
index c7a3972..0e6d478 100644
--- a/cobra/solvers/gurobi_solver.py
+++ b/cobra/solvers/gurobi_solver.py
@@ -1,8 +1,17 @@
+# -*- coding: utf-8 -*-
 # Interface to gurobipy
 
-from warnings import warn
-from multiprocessing import Process
+from __future__ import absolute_import
+
 import platform
+from multiprocessing import Process
+from warnings import warn
+
+from gurobipy import GRB, LinExpr, Model, QuadExpr
+from six import iteritems, string_types
+
+from ..core.solution import LegacySolution
+
 try:
     # Import izip for python versions < 3.x
     from itertools import izip as zip
@@ -27,11 +36,8 @@ if platform.system() != "Windows":
         raise RuntimeError("importing gurobi causes a crash (exitcode %d)" %
                            p.exitcode)
 
-from gurobipy import Model, LinExpr, GRB, QuadExpr
 
-from ..core.Solution import Solution
 
-from six import string_types, iteritems
 
 try:
     from sympy import Basic, Number
@@ -102,7 +108,7 @@ def get_objective_value(lp):
 def format_solution(lp, cobra_model, **kwargs):
     status = get_status(lp)
     if status not in ('optimal', 'time_limit'):
-        the_solution = Solution(None, status=status)
+        the_solution = LegacySolution(None, status=status)
     else:
         objective_value = lp.ObjVal
         x = [v.X for v in lp.getVars()]
@@ -113,7 +119,7 @@ def format_solution(lp, cobra_model, **kwargs):
             y = [c.Pi for c in lp.getConstrs()]
             y_dict = {m.id: value for m, value
                       in zip(cobra_model.metabolites, y)}
-        the_solution = Solution(objective_value, x=x, x_dict=x_dict, y=y,
+        the_solution = LegacySolution(objective_value, x=x, x_dict=x_dict, y=y,
                                 y_dict=y_dict, status=status)
     return(the_solution)
 
diff --git a/cobra/solvers/gurobi_solver_java.py b/cobra/solvers/gurobi_solver_java.py
index 6893f1f..1b91d59 100644
--- a/cobra/solvers/gurobi_solver_java.py
+++ b/cobra/solvers/gurobi_solver_java.py
@@ -1,18 +1,25 @@
+# -*- coding: utf-8 -*-
 # PLEASE NOTE THAT JYTHON SUPPORT (and this jython-only-solver) is deprecated
 #Interface to the gurobi 5.0.1 python and java solvers
 #QPs are not yet supported on java
-from __future__ import print_function
-from warnings import warn
-from os import name as __name
+from __future__ import absolute_import, print_function
+
 from copy import deepcopy
+from os import name as __name
+from time import time
+from warnings import warn
+
 from six import iteritems
+
+from gurobi import GRBQuadExpr as QuadExpr
+from gurobi import GRB, GRBEnv, GRBLinExpr, GRBModel
+
+from ..core.solution import Solution
 ###solver specific parameters
-from .parameters import status_dict, variable_kind_dict, \
-     sense_dict, parameter_mappings, parameter_defaults, \
-     objective_senses, default_objective_sense
+from .parameters import (
+    default_objective_sense, objective_senses, parameter_defaults,
+    parameter_mappings, sense_dict, status_dict, variable_kind_dict)
 
-from ..core.Solution import Solution
-from time import time
 solver_name = 'gurobi'
 objective_senses = objective_senses[solver_name]
 parameter_mappings = parameter_mappings[solver_name]
@@ -22,13 +29,9 @@ parameter_defaults = parameter_defaults[solver_name]
 ## def array(x, variable_type='d'):
 ##     return j_array(x, variable_type)
 
-from gurobi import GRB
 variable_kind_dict = eval(variable_kind_dict[solver_name])
 status_dict = eval(status_dict[solver_name])
 
-from gurobi import GRBModel, GRBEnv
-from gurobi import GRBLinExpr
-from gurobi import GRBQuadExpr as QuadExpr
 __solver_class = GRBModel
 #TODO: Create a pythonesqe class similar to in glpk_solver
 def Model(name=''):
diff --git a/cobra/solvers/mosek.py b/cobra/solvers/mosek.py
index 683b9d3..d19dcf0 100644
--- a/cobra/solvers/mosek.py
+++ b/cobra/solvers/mosek.py
@@ -1,9 +1,11 @@
+# -*- coding: utf-8 -*-
 from __future__ import absolute_import, print_function
 
 import mosek
-
-from six.moves import zip
 from six import iteritems, string_types
+from six.moves import zip
+
+from cobra.core.solution import LegacySolution
 
 env = mosek.Env()
 
@@ -199,8 +201,8 @@ def format_solution(lp, cobra_model):
     mosek_status = lp.getsolsta(soltype)
     status = status_dict.get(mosek_status, str(mosek_status))
     if status != "optimal":
-        return cobra_model.solution.__class__(None, status=status)
-    solution = cobra_model.solution.__class__(get_objective_value(lp))
+        return LegacySolution(None, status=status)
+    solution = LegacySolution(get_objective_value(lp))
     solution.status = status
     x = [0] * len(cobra_model.reactions)
     lp.getxx(soltype, x)
diff --git a/cobra/solvers/parameters.py b/cobra/solvers/parameters.py
index fd9be16..0a5e797 100644
--- a/cobra/solvers/parameters.py
+++ b/cobra/solvers/parameters.py
@@ -1,7 +1,11 @@
+# -*- coding: utf-8 -*-
 #This centralizes some of the common elements that are differently named across solvers.
 #These are stored as strings here to prevent problems associated with calling
 #solver objects for solver packages that aren't available
+from __future__ import absolute_import
+
 from copy import deepcopy
+
 __objective_sense_cplex = {'maximize': 'Cplex.objective.sense.maximize',
                            'minimize': 'Cplex.objective.sense.minimize'}
 __objective_sense_cplex_java = {'maximize': 'IloObjectiveSense.Maximize',
diff --git a/cobra/solvers/wrappers.py b/cobra/solvers/wrappers.py
index 24e027d..c8c0933 100644
--- a/cobra/solvers/wrappers.py
+++ b/cobra/solvers/wrappers.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
 """
 Wrappers for solvers with an object oriented interface. This creates
 functions to call the objects' functions.
@@ -8,6 +10,8 @@ create_problem = PROBLEM_CLASS.create_problem
 where PROBLEM_CLASS is the solver class (i.e. GLP, esolver, etc.)
 """
 
+from __future__ import absolute_import
+
 
 def set_objective_sense(lp, objective_sense="maximize"):
     return lp.set_objective_sense(lp, objective_sense=objective_sense)
diff --git a/cobra/test/__init__.py b/cobra/test/__init__.py
index 5e6d441..03f412c 100644
--- a/cobra/test/__init__.py
+++ b/cobra/test/__init__.py
@@ -1,5 +1,11 @@
-from os.path import join, abspath, dirname
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from os.path import abspath, dirname, join
+
 from cobra.io import read_sbml_model
+
 try:
     import pytest
     import pytest_benchmark
@@ -39,11 +45,15 @@ def create_test_model(model_name="salmonella"):
         return _load(infile)
 
 
-def test_all():
+def test_all(args=None):
     """ alias for running all unit-tests on installed cobra
     """
     if pytest:
+        args = args if args else []
+
         return pytest.main(
-            ['--pyargs', 'cobra', '--benchmark-skip', '-v', '-rs']) == 0
+            ['--pyargs', 'cobra', '--benchmark-skip', '-v', '-rs'] + args
+        )
     else:
-        raise ImportError('missing package pytest required for testing')
+        raise ImportError('missing package pytest and pytest_benchmark'
+                          ' required for testing')
diff --git a/cobra/test/conftest.py b/cobra/test/conftest.py
index e9bc9b9..3794ef5 100644
--- a/cobra/test/conftest.py
+++ b/cobra/test/conftest.py
@@ -1,11 +1,32 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import json
 from os.path import join
-from . import create_test_model, data_dir
+
 import pytest
+
+from cobra.test import create_test_model, data_dir
+
 try:
     from cPickle import load as _load
 except ImportError:
     from pickle import load as _load
-import json
+
+import cobra.util.solver as sutil
+from cobra.solvers import solver_dict
+from cobra import Model, Metabolite, Reaction
+
+
+def pytest_addoption(parser):
+    try:
+        parser.addoption("--run-slow", action="store_true",
+                         help="run slow tests")
+        parser.addoption("--run-non-deterministic", action="store_true",
+                         help="run tests that sometimes (rarely) fail")
+    except ValueError:
+        pass
 
 
 @pytest.fixture(scope="session")
@@ -13,22 +34,22 @@ def data_directory():
     return data_dir
 
 
- at pytest.fixture(scope="function")
-def model():
+ at pytest.fixture(scope="session")
+def small_model():
     return create_test_model("textbook")
 
 
 @pytest.fixture(scope="function")
-def large_model():
-    return create_test_model("ecoli")
+def model(small_model):
+    return small_model.copy()
 
 
 @pytest.fixture(scope="function")
-def array_model():
-    return create_test_model("textbook").to_array_based_model()
+def large_model():
+    return create_test_model("ecoli")
 
 
- at pytest.fixture(scope="function")
+ at pytest.fixture(scope="session")
 def salmonella():
     return create_test_model("salmonella")
 
@@ -38,11 +59,64 @@ def solved_model(data_directory):
     model = create_test_model("textbook")
     with open(join(data_directory, "textbook_solution.pickle"),
               "rb") as infile:
-        model.solution = _load(infile)
-    return model
+        solution = _load(infile)
+    return solution, model
 
 
- at pytest.fixture(scope="function")
+ at pytest.fixture(scope="session")
+def tiny_toy_model():
+    tiny = Model("Toy Model")
+    m1 = Metabolite("M1")
+    d1 = Reaction("ex1")
+    d1.add_metabolites({m1: -1})
+    d1.upper_bound = 0
+    d1.lower_bound = -1000
+    tiny.add_reactions([d1])
+    tiny.objective = 'ex1'
+    return tiny
+
+
+ at pytest.fixture(scope="session")
 def fva_results(data_directory):
     with open(join(data_directory, "textbook_fva.json"), "r") as infile:
         return json.load(infile)
+
+
+ at pytest.fixture(scope="session")
+def pfba_fva_results(data_directory):
+    with open(join(data_directory, "textbook_pfba_fva.json"), "r") as infile:
+        return json.load(infile)
+
+
+stable_optlang = ["glpk", "cplex", "gurobi"]
+optlang_solvers = ["optlang-" + s for s in stable_optlang if s in
+                   sutil.solvers]
+all_solvers = optlang_solvers + list(solver_dict)
+
+
+ at pytest.fixture(params=optlang_solvers, scope="session")
+def opt_solver(request):
+    return request.param
+
+
+ at pytest.fixture(params=list(solver_dict), scope="session")
+def legacy_solver(request):
+    return request.param
+
+
+ at pytest.fixture(scope="function")
+def metabolites(model, request):
+    if request.param == "exchange":
+        return [
+            met for met in model.metabolites if
+            met.compartment == 'e' and "EX_" + met.id not in model.reactions]
+    elif request.param == "demand":
+        return [
+            met for met in model.metabolites if
+            met.compartment == 'c' and "DM_" + met.id not in model.reactions]
+    elif request.param == "sink":
+        return [
+            met for met in model.metabolites if
+            met.compartment == 'c' and "SK_" + met.id not in model.reactions]
+    else:
+        raise ValueError("unknown metabolites {}".format(request.param))
diff --git a/cobra/test/data/iJO1366.pickle b/cobra/test/data/iJO1366.pickle
index 0178494..6346508 100644
Binary files a/cobra/test/data/iJO1366.pickle and b/cobra/test/data/iJO1366.pickle differ
diff --git a/cobra/test/data/mini.json b/cobra/test/data/mini.json
index a6234f9..51d719f 100644
--- a/cobra/test/data/mini.json
+++ b/cobra/test/data/mini.json
@@ -1107,7 +1107,7 @@
                 "pi_c": 1.0
             },
             "name": "ATP maintenance requirement",
-            "objective_coefficient": 1,
+            "objective_coefficient": 1.0,
             "upper_bound": 1000.0
         },
         {
@@ -1268,6 +1268,7 @@
                 "h_c": 1.0
             },
             "name": "phosphofructokinase",
+            "objective_coefficient": 1.0,
             "upper_bound": 1000.0
         },
         {
diff --git a/cobra/test/data/mini.mat b/cobra/test/data/mini.mat
index 5c3e857..6726342 100644
Binary files a/cobra/test/data/mini.mat and b/cobra/test/data/mini.mat differ
diff --git a/cobra/test/data/mini.pickle b/cobra/test/data/mini.pickle
index 922c455..eb9d561 100644
Binary files a/cobra/test/data/mini.pickle and b/cobra/test/data/mini.pickle differ
diff --git a/cobra/test/data/mini.yml b/cobra/test/data/mini.yml
new file mode 100644
index 0000000..88327a3
--- /dev/null
+++ b/cobra/test/data/mini.yml
@@ -0,0 +1,1148 @@
+!!omap
+- reactions:
+  - !!omap
+    - id: ATPM
+    - name: ATP maintenance requirement
+    - metabolites: !!omap
+      - adp_c: 1.0
+      - atp_c: -1.0
+      - h_c: 1.0
+      - h2o_c: -1.0
+      - pi_c: 1.0
+    - lower_bound: 8.39
+    - upper_bound: 1000.0
+    - gene_reaction_rule: ''
+    - objective_coefficient: 1.0
+    - annotation:
+        bigg.reaction: ATPM
+  - !!omap
+    - id: D_LACt2
+    - name: ''
+    - metabolites: !!omap
+      - h_e: -1
+      - lac__D_e: -1
+      - h_c: 1
+      - lac__D_c: 1
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b3603 or b2975
+  - !!omap
+    - id: ENO
+    - name: enolase
+    - metabolites: !!omap
+      - pep_c: 1.0
+      - h2o_c: 1.0
+      - 2pg_c: -1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b2779
+    - annotation:
+        bigg.reaction: ENO
+  - !!omap
+    - id: EX_glc__D_e
+    - name: D-Glucose exchange
+    - metabolites: !!omap
+      - glc__D_e: -1.0
+    - lower_bound: -10.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: ''
+    - annotation:
+        bigg.reaction: glc
+        SBO: SBO:0000627
+  - !!omap
+    - id: EX_h_e
+    - name: H+ exchange
+    - metabolites: !!omap
+      - h_e: -1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: ''
+    - annotation:
+        bigg.reaction: h
+        SBO: SBO:0000627
+  - !!omap
+    - id: EX_lac__D_e
+    - name: D-lactate exchange
+    - metabolites: !!omap
+      - lac__D_e: -1.0
+    - lower_bound: 0.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: ''
+  - !!omap
+    - id: FBA
+    - name: fructose-bisphosphate aldolase
+    - metabolites: !!omap
+      - dhap_c: 1.0
+      - fdp_c: -1.0
+      - g3p_c: 1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b1773 or b2097 or b2925
+    - annotation:
+        bigg.reaction: FBA
+  - !!omap
+    - id: GAPD
+    - name: glyceraldehyde-3-phosphate dehydrogenase
+    - metabolites: !!omap
+      - nad_c: -1.0
+      - 13dpg_c: 1.0
+      - g3p_c: -1.0
+      - nadh_c: 1.0
+      - h_c: 1.0
+      - pi_c: -1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b1779
+    - annotation:
+        bigg.reaction: GAPD
+  - !!omap
+    - id: GLCpts
+    - name: D-glucose transport via PEP:Pyr PTS
+    - metabolites: !!omap
+      - pep_c: -1.0
+      - g6p_c: 1.0
+      - glc__D_e: -1.0
+      - pyr_c: 1.0
+    - lower_bound: 0.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: ( b2417 and b1621 and b2415 and b2416 ) or ( b2417 and b1101
+        and b2415 and b2416 ) or ( b1817 and b1818 and b1819 and b2415 and b2416 )
+    - annotation:
+        bigg.reaction: GLCpts
+  - !!omap
+    - id: H2Ot
+    - name: R H2O transport via - diffusion
+    - metabolites: !!omap
+      - h2o_e: -1.0
+      - h2o_c: 1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b0875 or s0001
+    - annotation:
+        bigg.reaction: H2Ot
+  - !!omap
+    - id: LDH_D
+    - name: D-lactate dehydrogenase
+    - metabolites: !!omap
+      - nad_c: -1.0
+      - lac__D_c: -1.0
+      - h_c: 1.0
+      - nadh_c: 1.0
+      - pyr_c: 1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b2133 or b1380
+  - !!omap
+    - id: PFK
+    - name: phosphofructokinase
+    - metabolites: !!omap
+      - f6p_c: -1.0
+      - atp_c: -1.0
+      - fdp_c: 1.0
+      - h_c: 1.0
+      - adp_c: 1.0
+    - lower_bound: 0.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b3916 or b1723
+    - objective_coefficient: 1.0
+    - annotation:
+        bigg.reaction: PFK
+  - !!omap
+    - id: PGI
+    - name: glucose-6-phosphate isomerase
+    - metabolites: !!omap
+      - f6p_c: 1.0
+      - g6p_c: -1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b4025
+    - annotation:
+        bigg.reaction: PGI
+  - !!omap
+    - id: PGK
+    - name: phosphoglycerate kinase
+    - metabolites: !!omap
+      - 3pg_c: -1.0
+      - atp_c: -1.0
+      - 13dpg_c: 1.0
+      - adp_c: 1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b2926
+    - annotation:
+        bigg.reaction: PGK
+  - !!omap
+    - id: PGM
+    - name: phosphoglycerate mutase
+    - metabolites: !!omap
+      - 3pg_c: 1.0
+      - 2pg_c: -1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b4395 or b3612 or b0755
+    - annotation:
+        bigg.reaction: PGM
+  - !!omap
+    - id: PIt2r
+    - name: R phosphate reversible transport via - symport
+    - metabolites: !!omap
+      - h_c: 1.0
+      - pi_e: -1.0
+      - h_e: -1.0
+      - pi_c: 1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b2987 or b3493
+    - annotation:
+        bigg.reaction: PIt2r
+  - !!omap
+    - id: PYK
+    - name: pyruvate kinase
+    - metabolites: !!omap
+      - pep_c: -1.0
+      - h_c: -1.0
+      - atp_c: 1.0
+      - pyr_c: 1.0
+      - adp_c: -1.0
+    - lower_bound: 0.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b1854 or b1676
+    - annotation:
+        bigg.reaction: PYK
+  - !!omap
+    - id: TPI
+    - name: triose-phosphate isomerase
+    - metabolites: !!omap
+      - dhap_c: -1.0
+      - g3p_c: 1.0
+    - lower_bound: -1000.0
+    - upper_bound: 1000.0
+    - gene_reaction_rule: b3919
+    - annotation:
+        bigg.reaction: TPI
+- metabolites:
+  - !!omap
+    - id: 13dpg_c
+    - name: 3-Phospho-D-glyceroyl phosphate
+    - compartment: c
+    - charge: -4
+    - formula: C3H4O10P2
+    - annotation:
+        pubchem.substance: '3535'
+        biocyc: DPG
+        kegg.compound: C00236
+        seed.compound: cpd00203
+        reactome: REACT_29800
+        bigg.metabolite: 13dpg
+        hmdb: HMDB01270
+        chebi:
+        - CHEBI:16001
+        - CHEBI:1658
+        - CHEBI:20189
+        - CHEBI:57604
+        - CHEBI:11881
+        unipathway.compound: UPC00236
+  - !!omap
+    - id: 2pg_c
+    - name: D-Glycerate 2-phosphate
+    - compartment: c
+    - charge: -3
+    - formula: C3H4O7P
+    - annotation:
+        pubchem.substance: '3904'
+        biocyc: 2-PG
+        kegg.compound: C00631
+        seed.compound: cpd00482
+        reactome: REACT_30485
+        bigg.metabolite: 2pg
+        hmdb:
+        - HMDB03391
+        - HMDB00362
+        chebi:
+        - CHEBI:1267
+        - CHEBI:58289
+        - CHEBI:17835
+        - CHEBI:21028
+        - CHEBI:11651
+        - CHEBI:12986
+        - CHEBI:24344
+        - CHEBI:39868
+        unipathway.compound: UPC00631
+  - !!omap
+    - id: 3pg_c
+    - name: 3-Phospho-D-glycerate
+    - compartment: c
+    - charge: -3
+    - formula: C3H4O7P
+    - annotation:
+        pubchem.substance: '3497'
+        biocyc: G3P
+        kegg.compound:
+        - C00197
+        - C00597
+        seed.compound: cpd00169
+        reactome: REACT_29728
+        bigg.metabolite: 3pg
+        hmdb: HMDB00807
+        chebi:
+        - CHEBI:40016
+        - CHEBI:58272
+        - CHEBI:57998
+        - CHEBI:11879
+        - CHEBI:1657
+        - CHEBI:1659
+        - CHEBI:17050
+        - CHEBI:21029
+        - CHEBI:11882
+        - CHEBI:11880
+        - CHEBI:12987
+        - CHEBI:17794
+        - CHEBI:24345
+        unipathway.compound:
+        - UPC00597
+        - UPC00197
+  - !!omap
+    - id: adp_c
+    - name: ADP
+    - compartment: c
+    - charge: -3
+    - formula: C10H12N5O10P2
+    - annotation:
+        kegg.glycan: G11113
+        biocyc:
+        - ADP
+        - ADP-GROUP
+        chebi:
+        - CHEBI:13222
+        - CHEBI:16761
+        - CHEBI:2342
+        - CHEBI:22244
+        - CHEBI:40553
+        - CHEBI:456216
+        unipathway.compound: UPC00008
+        seed.compound: cpd00008
+        reactome:
+        - REACT_190072
+        - REACT_481002
+        - REACT_211606
+        - REACT_429160
+        - REACT_29370
+        - REACT_196180
+        - REACT_113581
+        - REACT_113582
+        - REACT_114564
+        - REACT_114565
+        - REACT_429153
+        bigg.metabolite: adp
+        hmdb: HMDB01341
+        pubchem.substance: '3310'
+        cas:
+        - 58-64-0
+        - 58-64-0
+        kegg.compound: C00008
+  - !!omap
+    - id: atp_c
+    - name: ATP
+    - compartment: c
+    - charge: -4
+    - formula: C10H12N5O13P3
+    - annotation:
+        pubchem.substance: '3304'
+        biocyc: ATP
+        chebi:
+        - CHEBI:40938
+        - CHEBI:15422
+        - CHEBI:57299
+        - CHEBI:13236
+        - CHEBI:10789
+        - CHEBI:30616
+        - CHEBI:22249
+        - CHEBI:10841
+        - CHEBI:2359
+        unipathway.compound: UPC00002
+        seed.compound: cpd00002
+        reactome:
+        - REACT_190078
+        - REACT_113592
+        - REACT_113593
+        - REACT_114570
+        - REACT_29358
+        - REACT_389573
+        - REACT_139836
+        - REACT_211579
+        bigg.metabolite: atp
+        hmdb: HMDB00538
+        kegg.drug: D08646
+        cas:
+        - 56-65-5
+        - 56-65-5
+        kegg.compound: C00002
+  - !!omap
+    - id: dhap_c
+    - name: Dihydroxyacetone phosphate
+    - compartment: c
+    - charge: -2
+    - formula: C3H5O6P
+    - annotation:
+        pubchem.substance: '3411'
+        biocyc: DIHYDROXY-ACETONE-PHOSPHATE
+        chebi:
+        - CHEBI:14341
+        - CHEBI:57642
+        - CHEBI:14342
+        - CHEBI:16108
+        - CHEBI:5454
+        - CHEBI:24355
+        - CHEBI:39571
+        unipathway.compound: UPC00111
+        seed.compound: cpd00095
+        reactome:
+        - REACT_188451
+        - REACT_75970
+        - REACT_390404
+        bigg.metabolite: dhap
+        hmdb:
+        - HMDB01473
+        - HMDB11735
+        cas:
+        - 57-04-5
+        - 57-04-5
+        kegg.compound: C00111
+  - !!omap
+    - id: f6p_c
+    - name: D-Fructose 6-phosphate
+    - compartment: c
+    - charge: -2
+    - formula: C6H11O9P
+    - annotation:
+        pubchem.substance: '3385'
+        biocyc: FRUCTOSE-6P
+        chebi:
+        - CHEBI:57634
+        - CHEBI:12352
+        - CHEBI:45804
+        - CHEBI:61527
+        - CHEBI:61553
+        - CHEBI:10375
+        - CHEBI:16084
+        - CHEBI:42378
+        - CHEBI:22768
+        unipathway.compound:
+        - UPC05345
+        - UPC00085
+        seed.compound: cpd00072
+        bigg.metabolite: f6p
+        hmdb: HMDB03971
+        cas:
+        - 643-13-0
+        - 643-13-0
+        kegg.compound:
+        - C05345
+        - C00085
+  - !!omap
+    - id: fdp_c
+    - name: D-Fructose 1,6-bisphosphate
+    - compartment: c
+    - charge: -4
+    - formula: C6H10O12P2
+    - annotation:
+        pubchem.substance: '3647'
+        biocyc: FRUCTOSE-16-DIPHOSPHATE
+        chebi:
+        - CHEBI:32968
+        - CHEBI:49299
+        - CHEBI:42553
+        - CHEBI:32966
+        - CHEBI:37736
+        - CHEBI:28013
+        - CHEBI:32967
+        - CHEBI:41014
+        - CHEBI:22767
+        - CHEBI:10374
+        - CHEBI:40595
+        - CHEBI:40591
+        unipathway.compound: UPC00354
+        seed.compound: cpd00290
+        bigg.metabolite: fdp
+        cas:
+        - 488-69-7
+        - 488-69-7
+        kegg.compound:
+        - C05378
+        - C00354
+  - !!omap
+    - id: g3p_c
+    - name: Glyceraldehyde 3-phosphate
+    - compartment: c
+    - charge: -2
+    - formula: C3H5O6P
+    - annotation:
+        pubchem.substance: '3930'
+        chebi:
+        - CHEBI:17138
+        - CHEBI:14333
+        - CHEBI:5446
+        - CHEBI:58027
+        unipathway.compound:
+        - UPC00661
+        - UPC00118
+        seed.compound: cpd00102
+        bigg.metabolite: g3p
+        hmdb: HMDB01112
+        cas:
+        - 142-10-9
+        - 142-10-9
+        kegg.compound:
+        - C00661
+        - C00118
+  - !!omap
+    - id: g6p_c
+    - name: D-Glucose 6-phosphate
+    - compartment: c
+    - charge: -2
+    - formula: C6H11O9P
+    - annotation:
+        pubchem.substance: '3392'
+        biocyc:
+        - D-glucose-6-phosphate
+        - GLC-6-P
+        chebi:
+        - CHEBI:10399
+        - CHEBI:22797
+        - CHEBI:41041
+        - CHEBI:17719
+        - CHEBI:4170
+        - CHEBI:61548
+        - CHEBI:58247
+        - CHEBI:12375
+        unipathway.compound: UPC00092
+        seed.compound: cpd00079
+        reactome: REACT_1629756
+        bigg.metabolite: g6p
+        hmdb:
+        - HMDB03498
+        - HMDB06793
+        - HMDB01401
+        - HMDB01549
+        cas:
+        - 56-73-5
+        - 56-73-5
+        kegg.compound:
+        - C00092
+        - C01172
+  - !!omap
+    - id: glc__D_e
+    - name: D-Glucose
+    - compartment: e
+    - charge: 0
+    - formula: C6H12O6
+    - annotation:
+        bigg.metabolite: glc__D
+        pubchem.substance: '3333'
+        cas:
+        - 50-99-7
+        - 50-99-7
+        kegg.compound: C00031
+  - !!omap
+    - id: h2o_c
+    - name: H2O
+    - compartment: c
+    - charge: 0
+    - formula: H2O
+    - annotation:
+        pubchem.substance: '3303'
+        biocyc:
+        - WATER
+        - OH
+        - OXONIUM
+        chebi:
+        - CHEBI:15377
+        - CHEBI:13365
+        - CHEBI:41979
+        - CHEBI:16234
+        - CHEBI:36385
+        - CHEBI:42857
+        - CHEBI:27313
+        - CHEBI:44819
+        - CHEBI:29373
+        - CHEBI:10743
+        - CHEBI:5594
+        - CHEBI:29356
+        - CHEBI:53442
+        - CHEBI:29375
+        - CHEBI:29374
+        - CHEBI:13419
+        - CHEBI:43228
+        - CHEBI:44292
+        - CHEBI:13352
+        - CHEBI:41981
+        - CHEBI:29412
+        - CHEBI:42043
+        - CHEBI:33811
+        - CHEBI:33813
+        - CHEBI:35511
+        - CHEBI:5585
+        - CHEBI:44641
+        - CHEBI:44701
+        unipathway.compound:
+        - UPC00001
+        - UPC01328
+        seed.compound:
+        - cpd15275
+        - cpd00001
+        reactome:
+        - REACT_947593
+        - REACT_189422
+        - REACT_141343
+        - REACT_113518
+        - REACT_1605715
+        - REACT_109276
+        - REACT_113521
+        - REACT_113519
+        - REACT_2022884
+        - REACT_351603
+        - REACT_29356
+        bigg.metabolite: h2o
+        hmdb:
+        - HMDB01039
+        - HMDB02111
+        kegg.drug:
+        - D00001
+        - D06322
+        - D03703
+        cas:
+        - 7732-18-5
+        - 7732-18-5
+        kegg.compound:
+        - C01328
+        - C00001
+        - C18714
+        - C18712
+  - !!omap
+    - id: h2o_e
+    - name: H2O
+    - compartment: e
+    - charge: 0
+    - formula: H2O
+    - annotation:
+        pubchem.substance: '3303'
+        biocyc:
+        - WATER
+        - OH
+        - OXONIUM
+        chebi:
+        - CHEBI:15377
+        - CHEBI:13365
+        - CHEBI:41979
+        - CHEBI:16234
+        - CHEBI:36385
+        - CHEBI:42857
+        - CHEBI:27313
+        - CHEBI:44819
+        - CHEBI:29373
+        - CHEBI:10743
+        - CHEBI:5594
+        - CHEBI:29356
+        - CHEBI:53442
+        - CHEBI:29375
+        - CHEBI:29374
+        - CHEBI:13419
+        - CHEBI:43228
+        - CHEBI:44292
+        - CHEBI:13352
+        - CHEBI:41981
+        - CHEBI:29412
+        - CHEBI:42043
+        - CHEBI:33811
+        - CHEBI:33813
+        - CHEBI:35511
+        - CHEBI:5585
+        - CHEBI:44641
+        - CHEBI:44701
+        unipathway.compound:
+        - UPC00001
+        - UPC01328
+        seed.compound:
+        - cpd15275
+        - cpd00001
+        reactome:
+        - REACT_947593
+        - REACT_189422
+        - REACT_141343
+        - REACT_113518
+        - REACT_1605715
+        - REACT_109276
+        - REACT_113521
+        - REACT_113519
+        - REACT_2022884
+        - REACT_351603
+        - REACT_29356
+        bigg.metabolite: h2o
+        hmdb:
+        - HMDB01039
+        - HMDB02111
+        kegg.drug:
+        - D00001
+        - D06322
+        - D03703
+        cas:
+        - 7732-18-5
+        - 7732-18-5
+        kegg.compound:
+        - C01328
+        - C00001
+        - C18714
+        - C18712
+  - !!omap
+    - id: h_c
+    - name: H+
+    - compartment: c
+    - charge: 1
+    - formula: H
+    - annotation:
+        pubchem.substance: '3380'
+        biocyc: PROTON
+        chebi:
+        - CHEBI:24636
+        - CHEBI:15378
+        - CHEBI:10744
+        - CHEBI:13357
+        - CHEBI:5584
+        unipathway.compound: UPC00080
+        seed.compound: cpd00067
+        reactome:
+        - REACT_194688
+        - REACT_425978
+        - REACT_193465
+        - REACT_374900
+        - REACT_74722
+        - REACT_425999
+        - REACT_428040
+        - REACT_163953
+        - REACT_372511
+        - REACT_2000349
+        - REACT_70106
+        - REACT_1470067
+        - REACT_113529
+        - REACT_425969
+        - REACT_428548
+        - REACT_156540
+        - REACT_1614597
+        - REACT_351626
+        - REACT_427899
+        bigg.metabolite: h
+        cas:
+        - 12408-02-5
+        - 12408-02-5
+        kegg.compound: C00080
+  - !!omap
+    - id: h_e
+    - name: H+
+    - compartment: e
+    - charge: 1
+    - formula: H
+    - annotation:
+        pubchem.substance: '3380'
+        biocyc: PROTON
+        chebi:
+        - CHEBI:24636
+        - CHEBI:15378
+        - CHEBI:10744
+        - CHEBI:13357
+        - CHEBI:5584
+        unipathway.compound: UPC00080
+        seed.compound: cpd00067
+        reactome:
+        - REACT_194688
+        - REACT_425978
+        - REACT_193465
+        - REACT_374900
+        - REACT_74722
+        - REACT_425999
+        - REACT_428040
+        - REACT_163953
+        - REACT_372511
+        - REACT_2000349
+        - REACT_70106
+        - REACT_1470067
+        - REACT_113529
+        - REACT_425969
+        - REACT_428548
+        - REACT_156540
+        - REACT_1614597
+        - REACT_351626
+        - REACT_427899
+        bigg.metabolite: h
+        cas:
+        - 12408-02-5
+        - 12408-02-5
+        kegg.compound: C00080
+  - !!omap
+    - id: lac__D_c
+    - name: D-Lactate
+    - compartment: c
+    - charge: -1
+    - formula: C3H5O3
+  - !!omap
+    - id: lac__D_e
+    - name: D-Lactate
+    - compartment: e
+    - charge: -1
+    - formula: C3H5O3
+  - !!omap
+    - id: nad_c
+    - name: Nicotinamide adenine dinucleotide
+    - compartment: c
+    - charge: -1
+    - formula: C21H26N7O14P2
+    - annotation:
+        pubchem.substance: '3305'
+        biocyc: NAD
+        chebi:
+        - CHEBI:21901
+        - CHEBI:7422
+        - CHEBI:44214
+        - CHEBI:15846
+        - CHEBI:13394
+        - CHEBI:13393
+        - CHEBI:44215
+        - CHEBI:13389
+        - CHEBI:57540
+        - CHEBI:44281
+        unipathway.compound: UPC00003
+        seed.compound: cpd00003
+        reactome:
+        - REACT_192307
+        - REACT_29360
+        - REACT_427523
+        - REACT_194653
+        - REACT_113526
+        bigg.metabolite: nad
+        hmdb: HMDB00902
+        kegg.drug: D00002
+        cas:
+        - 53-84-9
+        - 53-84-9
+        kegg.compound: C00003
+  - !!omap
+    - id: nadh_c
+    - name: Nicotinamide adenine dinucleotide - reduced
+    - compartment: c
+    - charge: -2
+    - formula: C21H27N7O14P2
+    - annotation:
+        pubchem.substance: '3306'
+        biocyc: NADH
+        chebi:
+        - CHEBI:13395
+        - CHEBI:21902
+        - CHEBI:16908
+        - CHEBI:7423
+        - CHEBI:44216
+        - CHEBI:57945
+        - CHEBI:13396
+        unipathway.compound: UPC00004
+        seed.compound: cpd00004
+        reactome:
+        - REACT_192305
+        - REACT_73473
+        - REACT_194697
+        - REACT_29362
+        bigg.metabolite: nadh
+        hmdb: HMDB01487
+        cas:
+        - 58-68-4
+        - 58-68-4
+        kegg.compound: C00004
+  - !!omap
+    - id: pep_c
+    - name: Phosphoenolpyruvate
+    - compartment: c
+    - charge: -3
+    - formula: C3H2O6P
+    - annotation:
+        pubchem.substance: '3374'
+        biocyc: PHOSPHO-ENOL-PYRUVATE
+        chebi:
+        - CHEBI:44897
+        - CHEBI:44894
+        - CHEBI:14812
+        - CHEBI:8147
+        - CHEBI:26055
+        - CHEBI:26054
+        - CHEBI:58702
+        - CHEBI:18021
+        unipathway.compound: UPC00074
+        seed.compound: cpd00061
+        reactome:
+        - REACT_29492
+        - REACT_372364
+        bigg.metabolite: pep
+        hmdb: HMDB00263
+        cas:
+        - 138-08-9
+        - 138-08-9
+        kegg.compound: C00074
+  - !!omap
+    - id: pi_c
+    - name: Phosphate
+    - compartment: c
+    - charge: -2
+    - formula: HO4P
+    - annotation:
+        pubchem.substance: '3311'
+        biocyc:
+        - Pi
+        - PHOSPHATE-GROUP
+        - CPD0-1421
+        chebi:
+        - CHEBI:37583
+        - CHEBI:7793
+        - CHEBI:37585
+        - CHEBI:34683
+        - CHEBI:14791
+        - CHEBI:34855
+        - CHEBI:29137
+        - CHEBI:29139
+        - CHEBI:63036
+        - CHEBI:26020
+        - CHEBI:39739
+        - CHEBI:32597
+        - CHEBI:32596
+        - CHEBI:43474
+        - CHEBI:63051
+        - CHEBI:43470
+        - CHEBI:9679
+        - CHEBI:35433
+        - CHEBI:4496
+        - CHEBI:45024
+        - CHEBI:18367
+        - CHEBI:26078
+        - CHEBI:39745
+        - CHEBI:24838
+        unipathway.compound: UPC00009
+        seed.compound:
+        - cpd09464
+        - cpd09463
+        - cpd00009
+        reactome:
+        - REACT_947590
+        - REACT_109277
+        - REACT_113548
+        - REACT_2255331
+        - REACT_29372
+        - REACT_113550
+        - REACT_113551
+        bigg.metabolite: pi
+        hmdb: HMDB02142
+        kegg.drug: D05467
+        cas:
+        - 14265-44-2
+        - 14265-44-2
+        kegg.compound:
+        - C13556
+        - C13558
+        - C00009
+  - !!omap
+    - id: pi_e
+    - name: Phosphate
+    - compartment: e
+    - charge: -2
+    - formula: HO4P
+    - annotation:
+        pubchem.substance: '3311'
+        biocyc:
+        - Pi
+        - PHOSPHATE-GROUP
+        - CPD0-1421
+        chebi:
+        - CHEBI:37583
+        - CHEBI:7793
+        - CHEBI:37585
+        - CHEBI:34683
+        - CHEBI:14791
+        - CHEBI:34855
+        - CHEBI:29137
+        - CHEBI:29139
+        - CHEBI:63036
+        - CHEBI:26020
+        - CHEBI:39739
+        - CHEBI:32597
+        - CHEBI:32596
+        - CHEBI:43474
+        - CHEBI:63051
+        - CHEBI:43470
+        - CHEBI:9679
+        - CHEBI:35433
+        - CHEBI:4496
+        - CHEBI:45024
+        - CHEBI:18367
+        - CHEBI:26078
+        - CHEBI:39745
+        - CHEBI:24838
+        unipathway.compound: UPC00009
+        seed.compound:
+        - cpd09464
+        - cpd09463
+        - cpd00009
+        reactome:
+        - REACT_947590
+        - REACT_109277
+        - REACT_113548
+        - REACT_2255331
+        - REACT_29372
+        - REACT_113550
+        - REACT_113551
+        bigg.metabolite: pi
+        hmdb: HMDB02142
+        kegg.drug: D05467
+        cas:
+        - 14265-44-2
+        - 14265-44-2
+        kegg.compound:
+        - C13556
+        - C13558
+        - C00009
+  - !!omap
+    - id: pyr_c
+    - name: Pyruvate
+    - compartment: c
+    - charge: -1
+    - formula: C3H3O3
+    - annotation:
+        pubchem.substance: '3324'
+        biocyc: PYRUVATE
+        chebi:
+        - CHEBI:15361
+        - CHEBI:14987
+        - CHEBI:8685
+        - CHEBI:32816
+        - CHEBI:45253
+        - CHEBI:26466
+        - CHEBI:26462
+        lipidmaps: LMFA01060077
+        seed.compound: cpd00020
+        kegg.compound: C00022
+        reactome:
+        - REACT_113557
+        - REACT_389680
+        - REACT_29398
+        bigg.metabolite: pyr
+        hmdb: HMDB00243
+        cas:
+        - 127-17-3
+        - 127-17-3
+        unipathway.compound: UPC00022
+- genes:
+  - !!omap
+    - id: b0755
+    - name: gpmA
+  - !!omap
+    - id: b0875
+    - name: aqpZ
+  - !!omap
+    - id: b1101
+    - name: ptsG
+  - !!omap
+    - id: b1380
+    - name: ldhA
+  - !!omap
+    - id: b1621
+    - name: malX
+  - !!omap
+    - id: b1676
+    - name: pykF
+    - annotation:
+        ncbigi:
+        - GI:1208453
+        - GI:1652654
+  - !!omap
+    - id: b1723
+    - name: pfkB
+  - !!omap
+    - id: b1773
+    - name: ydjI
+  - !!omap
+    - id: b1779
+    - name: gapA
+  - !!omap
+    - id: b1817
+    - name: manX
+  - !!omap
+    - id: b1818
+    - name: manY
+  - !!omap
+    - id: b1819
+    - name: manZ
+  - !!omap
+    - id: b1854
+    - name: pykA
+  - !!omap
+    - id: b2097
+    - name: fbaB
+  - !!omap
+    - id: b2133
+    - name: dld
+  - !!omap
+    - id: b2415
+    - name: ptsH
+  - !!omap
+    - id: b2416
+    - name: ptsI
+  - !!omap
+    - id: b2417
+    - name: crr
+  - !!omap
+    - id: b2779
+    - name: eno
+    - annotation:
+        ncbigi: GI:1653839
+  - !!omap
+    - id: b2925
+    - name: fbaA
+  - !!omap
+    - id: b2926
+    - name: pgk
+    - annotation:
+        ncbigi: GI:1653609
+  - !!omap
+    - id: b2975
+    - name: glcA
+  - !!omap
+    - id: b2987
+    - name: pitB
+  - !!omap
+    - id: b3493
+    - name: pitA
+  - !!omap
+    - id: b3603
+    - name: lldP
+  - !!omap
+    - id: b3612
+    - name: gpmM
+  - !!omap
+    - id: b3916
+    - name: pfkA
+    - annotation:
+        ncbigi:
+        - GI:1006614
+        - GI:1651919
+  - !!omap
+    - id: b3919
+    - name: tpiA
+  - !!omap
+    - id: b4025
+    - name: pgi
+    - annotation:
+        ncbigi: GI:1653253
+  - !!omap
+    - id: b4395
+    - name: ytjC
+  - !!omap
+    - id: s0001
+    - name: G_s0001
+- id: mini_textbook
+- compartments:
+    e: extracellular
+    c: cytosol
+- version: 1
diff --git a/cobra/test/data/mini_cobra.xml b/cobra/test/data/mini_cobra.xml
index 4a136b3..48b0229 100644
--- a/cobra/test/data/mini_cobra.xml
+++ b/cobra/test/data/mini_cobra.xml
@@ -206,8 +206,8 @@
         </listOfReactants>
         <listOfProducts>
           <speciesReference species="M_adp_c" stoichiometry="1"/>
-          <speciesReference species="M_h_c" stoichiometry="1"/>
           <speciesReference species="M_pi_c" stoichiometry="1"/>
+          <speciesReference species="M_h_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -215,9 +215,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="8.39" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="1" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="1" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -241,9 +241,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -257,8 +257,8 @@
           <speciesReference species="M_2pg_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_pep_c" stoichiometry="1"/>
           <speciesReference species="M_h2o_c" stoichiometry="1"/>
+          <speciesReference species="M_pep_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -266,9 +266,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -285,9 +285,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-10" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -304,9 +304,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -323,9 +323,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="0" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -339,8 +339,8 @@
           <speciesReference species="M_fdp_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_dhap_c" stoichiometry="1"/>
           <speciesReference species="M_g3p_c" stoichiometry="1"/>
+          <speciesReference species="M_dhap_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -348,9 +348,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -361,13 +361,13 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_g3p_c" stoichiometry="1"/>
           <speciesReference species="M_nad_c" stoichiometry="1"/>
           <speciesReference species="M_pi_c" stoichiometry="1"/>
+          <speciesReference species="M_g3p_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_h_c" stoichiometry="1"/>
           <speciesReference species="M_13dpg_c" stoichiometry="1"/>
+          <speciesReference species="M_h_c" stoichiometry="1"/>
           <speciesReference species="M_nadh_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
@@ -376,9 +376,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -393,8 +393,8 @@
           <speciesReference species="M_glc__D_e" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_pyr_c" stoichiometry="1"/>
           <speciesReference species="M_g6p_c" stoichiometry="1"/>
+          <speciesReference species="M_pyr_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -402,9 +402,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="0" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -426,9 +426,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -439,13 +439,13 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_lac__D_c" stoichiometry="1"/>
           <speciesReference species="M_nad_c" stoichiometry="1"/>
+          <speciesReference species="M_lac__D_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_h_c" stoichiometry="1"/>
-          <speciesReference species="M_pyr_c" stoichiometry="1"/>
           <speciesReference species="M_nadh_c" stoichiometry="1"/>
+          <speciesReference species="M_pyr_c" stoichiometry="1"/>
+          <speciesReference species="M_h_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -453,9 +453,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -466,13 +466,13 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_f6p_c" stoichiometry="1"/>
           <speciesReference species="M_atp_c" stoichiometry="1"/>
+          <speciesReference species="M_f6p_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
           <speciesReference species="M_adp_c" stoichiometry="1"/>
-          <speciesReference species="M_h_c" stoichiometry="1"/>
           <speciesReference species="M_fdp_c" stoichiometry="1"/>
+          <speciesReference species="M_h_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -480,9 +480,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="0" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="1" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -504,9 +504,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -517,8 +517,8 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_3pg_c" stoichiometry="1"/>
           <speciesReference species="M_atp_c" stoichiometry="1"/>
+          <speciesReference species="M_3pg_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
           <speciesReference species="M_13dpg_c" stoichiometry="1"/>
@@ -530,9 +530,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -554,9 +554,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -567,12 +567,12 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_h_e" stoichiometry="1"/>
           <speciesReference species="M_pi_e" stoichiometry="1"/>
+          <speciesReference species="M_h_e" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_h_c" stoichiometry="1"/>
           <speciesReference species="M_pi_c" stoichiometry="1"/>
+          <speciesReference species="M_h_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -580,9 +580,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -594,12 +594,12 @@
         </notes>
         <listOfReactants>
           <speciesReference species="M_adp_c" stoichiometry="1"/>
-          <speciesReference species="M_h_c" stoichiometry="1"/>
           <speciesReference species="M_pep_c" stoichiometry="1"/>
+          <speciesReference species="M_h_c" stoichiometry="1"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_pyr_c" stoichiometry="1"/>
           <speciesReference species="M_atp_c" stoichiometry="1"/>
+          <speciesReference species="M_pyr_c" stoichiometry="1"/>
         </listOfProducts>
         <kineticLaw>
           <math xmlns="http://www.w3.org/1998/Math/MathML">
@@ -607,9 +607,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="0" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
@@ -631,9 +631,9 @@
           </math>
           <listOfParameters>
             <parameter id="LOWER_BOUND" value="-1000" units="mmol_per_gDW_per_hr"/>
-            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
-            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
             <parameter id="FLUX_VALUE" value="0" units="mmol_per_gDW_per_hr"/>
+            <parameter id="UPPER_BOUND" value="1000" units="mmol_per_gDW_per_hr"/>
+            <parameter id="OBJECTIVE_COEFFICIENT" value="0" units="dimensionless"/>
           </listOfParameters>
         </kineticLaw>
       </reaction>
diff --git a/cobra/test/data/mini_fbc1.xml b/cobra/test/data/mini_fbc1.xml
index 9209063..58fb4bc 100644
--- a/cobra/test/data/mini_fbc1.xml
+++ b/cobra/test/data/mini_fbc1.xml
@@ -192,21 +192,21 @@
           </html>
         </notes>
       </species>
-      <species id="M_glc__D_e" name="D-Glucose" compartment="e" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:chemicalFormula="C6H12O6">
+      <species id="M_glc__D_e" name="D-Glucose" compartment="e" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0" fbc:chemicalFormula="C6H12O6">
         <notes>
           <html xmlns="http://www.w3.org/1999/xhtml">
             <p>FORMULA: C6H12O6</p>
           </html>
         </notes>
       </species>
-      <species id="M_h2o_c" name="H2O" compartment="c" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:chemicalFormula="H2O">
+      <species id="M_h2o_c" name="H2O" compartment="c" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0" fbc:chemicalFormula="H2O">
         <notes>
           <html xmlns="http://www.w3.org/1999/xhtml">
             <p>FORMULA: H2O</p>
           </html>
         </notes>
       </species>
-      <species id="M_h2o_e" name="H2O" compartment="e" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:chemicalFormula="H2O">
+      <species id="M_h2o_e" name="H2O" compartment="e" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0" fbc:chemicalFormula="H2O">
         <notes>
           <html xmlns="http://www.w3.org/1999/xhtml">
             <p>FORMULA: H2O</p>
@@ -283,7 +283,7 @@
           </html>
         </notes>
       </species>
-      <species id="M_glc__D_e_boundary" name="D-Glucose" compartment="e" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false" fbc:chemicalFormula="C6H12O6">
+      <species id="M_glc__D_e_boundary" name="D-Glucose" compartment="e" substanceUnits="substance" hasOnlySubstanceUnits="false" boundaryCondition="true" constant="false" fbc:charge="0" fbc:chemicalFormula="C6H12O6">
         <notes>
           <html xmlns="http://www.w3.org/1999/xhtml">
             <p>FORMULA: C6H12O6</p>
@@ -312,9 +312,9 @@
           <speciesReference species="M_h2o_c" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
+          <speciesReference species="M_adp_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_pi_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
-          <speciesReference species="M_adp_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_D_LACt2" name="D_LACt2" reversible="true" fast="false">
@@ -328,8 +328,8 @@
           <speciesReference species="M_lac__D_e" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_lac__D_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_ENO" name="enolase" reversible="true" fast="false">
@@ -380,8 +380,8 @@
           <speciesReference species="M_fdp_c" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_dhap_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_g3p_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_dhap_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_GAPD" name="glyceraldehyde-3-phosphate dehydrogenase" reversible="true" fast="false">
@@ -408,8 +408,8 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_glc__D_e" stoichiometry="1" constant="true"/>
           <speciesReference species="M_pep_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_glc__D_e" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
           <speciesReference species="M_g6p_c" stoichiometry="1" constant="true"/>
@@ -440,9 +440,9 @@
           <speciesReference species="M_lac__D_c" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
-          <speciesReference species="M_pyr_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_nadh_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_pyr_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_PFK" name="phosphofructokinase" reversible="false" fast="false">
@@ -452,13 +452,13 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_f6p_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_atp_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_f6p_c" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
           <speciesReference species="M_adp_c" stoichiometry="1" constant="true"/>
-          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_fdp_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_PGI" name="glucose-6-phosphate isomerase" reversible="true" fast="false">
@@ -481,8 +481,8 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_3pg_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_atp_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_3pg_c" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
           <speciesReference species="M_13dpg_c" stoichiometry="1" constant="true"/>
@@ -509,12 +509,12 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_h_e" stoichiometry="1" constant="true"/>
           <speciesReference species="M_pi_e" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_h_e" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_pi_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_PYK" name="pyruvate kinase" reversible="false" fast="false">
@@ -524,13 +524,13 @@
           </html>
         </notes>
         <listOfReactants>
-          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_adp_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_pep_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_h_c" stoichiometry="1" constant="true"/>
         </listOfReactants>
         <listOfProducts>
-          <speciesReference species="M_pyr_c" stoichiometry="1" constant="true"/>
           <speciesReference species="M_atp_c" stoichiometry="1" constant="true"/>
+          <speciesReference species="M_pyr_c" stoichiometry="1" constant="true"/>
         </listOfProducts>
       </reaction>
       <reaction id="R_TPI" name="triose-phosphate isomerase" reversible="true" fast="false">
@@ -589,6 +589,7 @@
       <fbc:objective fbc:id="obj" fbc:type="maximize">
         <fbc:listOfFluxObjectives>
           <fbc:fluxObjective fbc:reaction="R_ATPM" fbc:coefficient="1"/>
+          <fbc:fluxObjective fbc:reaction="R_PFK" fbc:coefficient="1"/>
         </fbc:listOfFluxObjectives>
       </fbc:objective>
     </fbc:listOfObjectives>
diff --git a/cobra/test/data/mini_fbc2.xml b/cobra/test/data/mini_fbc2.xml
index 303499b..53ab76d 100644
--- a/cobra/test/data/mini_fbc2.xml
+++ b/cobra/test/data/mini_fbc2.xml
@@ -14,6 +14,7 @@
       <fbc:objective fbc:id="obj" fbc:type="maximize">
         <fbc:listOfFluxObjectives>
           <fbc:fluxObjective fbc:reaction="R_ATPM" fbc:coefficient="1"/>
+          <fbc:fluxObjective fbc:reaction="R_PFK" fbc:coefficient="1"/>
         </fbc:listOfFluxObjectives>
       </fbc:objective>
     </fbc:listOfObjectives>
diff --git a/cobra/test/data/mini_fbc2.xml.bz2 b/cobra/test/data/mini_fbc2.xml.bz2
index 7fe05c6..a9b1167 100644
Binary files a/cobra/test/data/mini_fbc2.xml.bz2 and b/cobra/test/data/mini_fbc2.xml.bz2 differ
diff --git a/cobra/test/data/mini_fbc2.xml.gz b/cobra/test/data/mini_fbc2.xml.gz
index 89af7b4..16d383c 100644
Binary files a/cobra/test/data/mini_fbc2.xml.gz and b/cobra/test/data/mini_fbc2.xml.gz differ
diff --git a/cobra/test/data/raven.pickle b/cobra/test/data/raven.pickle
index 8391da9..9750952 100644
Binary files a/cobra/test/data/raven.pickle and b/cobra/test/data/raven.pickle differ
diff --git a/cobra/test/data/salmonella.pickle b/cobra/test/data/salmonella.pickle
index 5b1ef60..3848bdb 100644
Binary files a/cobra/test/data/salmonella.pickle and b/cobra/test/data/salmonella.pickle differ
diff --git a/cobra/test/data/textbook_fva.json b/cobra/test/data/textbook_fva.json
index c4c8438..f31092a 100644
--- a/cobra/test/data/textbook_fva.json
+++ b/cobra/test/data/textbook_fva.json
@@ -1 +1 @@
-{"ACALD": {"minimum": 0.0, "maximum": 0.0}, "ACALDt": {"minimum": 0.0, "maximum": -0.0}, "ACKr": {"minimum": 0.0, "maximum": -0.0}, "ACONTa": {"minimum": 6.00725, "maximum": 6.00725}, "ACONTb": {"minimum": 6.00725, "maximum": 6.00725}, "ACt2r": {"minimum": 0.0, "maximum": 0.0}, "ADK1": {"minimum": 0.0, "maximum": -0.0}, "AKGDH": {"minimum": 5.06438, "maximum": 5.06438}, "AKGt2r": {"minimum": 0.0, "maximum": 0.0}, "ALCD2x": {"minimum": 0.0, "maximum": 0.0}, "ATPM": {"minimum": 8.39, "maxi [...]
\ No newline at end of file
+{"maximum": {"EX_fum_e": 0.0, "ACALDt": 0.0, "EX_glc__D_e": -10.0, "EX_mal__L_e": -0.0, "ADK1": -0.0, "ICL": -0.0, "TALA": 1.49698, "EX_ac_e": -0.0, "PGI": 4.86086, "ACKr": 0.0, "NADTRHD": -0.0, "SUCCt2_2": -0.0, "O2t": 21.79949, "EX_co2_e": 22.80983, "PTAr": -0.0, "EX_h2o_e": 29.17583, "GLUDy": -4.54186, "ACONTa": 6.00725, "GLCpts": 10.0, "GAPD": 16.02353, "TKT1": 1.49698, "TKT2": 1.1815, "NADH16": 38.53461, "EX_etoh_e": -0.0, "ME1": -0.0, "FBP": -0.0, "GLUt2r": 0.0, "SUCDi": 1000.0, "E [...]
\ No newline at end of file
diff --git a/cobra/test/data/textbook_pfba_fva.json b/cobra/test/data/textbook_pfba_fva.json
new file mode 100644
index 0000000..c8acb02
--- /dev/null
+++ b/cobra/test/data/textbook_pfba_fva.json
@@ -0,0 +1 @@
+{"maximum": {"ACALD": 0.0, "ACALDt": 0.0, "ACKr": 0.0, "ACONTa": 6.00725, "ACONTb": 6.00725, "ACt2r": 0.0, "ADK1": -0.0, "AKGDH": 5.06438, "AKGt2r": 0.0, "ALCD2x": -0.0, "ATPM": 8.39, "ATPS4r": 45.51401, "Biomass_Ecoli_core": 0.87392, "CO2t": -22.80983, "CS": 6.00725, "CYTBD": 43.59899, "D_LACt2": 0.0, "ENO": 14.71614, "ETOHt2r": -0.0, "EX_ac_e": -0.0, "EX_acald_e": -0.0, "EX_akg_e": -0.0, "EX_co2_e": 22.80983, "EX_etoh_e": -0.0, "EX_for_e": -0.0, "EX_fru_e": -0.0, "EX_fum_e": 0.0, "EX_g [...]
\ No newline at end of file
diff --git a/cobra/test/data/textbook_solution.pickle b/cobra/test/data/textbook_solution.pickle
index 889ab38..f5e453d 100644
Binary files a/cobra/test/data/textbook_solution.pickle and b/cobra/test/data/textbook_solution.pickle differ
diff --git a/cobra/test/data/update_pickles.py b/cobra/test/data/update_pickles.py
index 8ea205e..5d28025 100755
--- a/cobra/test/data/update_pickles.py
+++ b/cobra/test/data/update_pickles.py
@@ -1,4 +1,17 @@
 #!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from collections import OrderedDict
+from json import dump as json_dump
+
+import cobra
+from cobra.io import (
+    load_matlab_model, read_sbml_model, save_json_model, save_matlab_model,
+    write_sbml_model)
+from cobra.io.sbml3 import write_sbml2
+
 # This script regenerates pickles of cobra Models.  Should be
 # performed after updating core classes to prevent subtle bugs.
 try:
@@ -6,14 +19,6 @@ try:
 except:
     from pickle import load, dump
 
-from json import dump as json_dump
-from collections import OrderedDict
-
-import cobra
-from cobra.version import get_version
-from cobra.io import read_sbml_model, write_sbml_model, save_matlab_model, \
-    save_json_model, load_matlab_model
-from cobra.io.sbml3 import write_sbml2
 
 # ecoli
 ecoli_model = read_sbml_model("iJO1366.xml")
@@ -43,7 +48,7 @@ for r in textbook.reactions:
                 "PIt2r"):
         mini.add_reaction(r.copy())
 mini.reactions.ATPM.upper_bound = mini.reactions.PGI.upper_bound
-mini.change_objective("ATPM")  # No biomass
+mini.objective = ["PFK", "ATPM"]  # No biomass, 2 reactions
 
 # add in some information from iJO1366
 mini.add_reaction(ecoli_model.reactions.LDH_D.copy())
@@ -85,6 +90,8 @@ raven = load_matlab_model("raven.mat")
 with open("raven.pickle", "wb") as outfile:
     dump(raven, outfile, protocol=2)
 
+# TODO:these need a reference solutions rather than circular solution checking!
+
 # fva results
 fva_result = cobra.flux_analysis.flux_variability_analysis(textbook)
 clean_result = OrderedDict()
@@ -93,7 +100,16 @@ for key in sorted(fva_result):
 with open("textbook_fva.json", "w") as outfile:
     json_dump(clean_result, outfile)
 
+# fva with pfba constraint
+fva_result = cobra.flux_analysis.flux_variability_analysis(textbook,
+                                                           pfba_factor=1.1)
+clean_result = OrderedDict()
+for key in sorted(fva_result):
+    clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
+with open("textbook_pfba_fva.json", "w") as outfile:
+    json_dump(clean_result, outfile)
+
 # textbook solution
-cobra.flux_analysis.parsimonious.optimize_minimal_flux(textbook)
+solution = cobra.flux_analysis.parsimonious.pfba(textbook)
 with open('textbook_solution.pickle', 'wb') as f:
-    dump(textbook.solution, f, protocol=2)
+    dump(solution, f, protocol=2)
diff --git a/cobra/test/test_design.py b/cobra/test/test_design.py
deleted file mode 100644
index 4c65565..0000000
--- a/cobra/test/test_design.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import pytest
-
-from cobra.design import *
-from cobra.design.design_algorithms import _add_decision_variable
-from cobra.solvers import get_solver_name
-from .conftest import model
-
-try:
-    solver = get_solver_name(mip=True)
-except ImportError:
-    no_mip_solver = True
-else:
-    no_mip_solver = False
-
-
-class TestDesignAlgorithms:
-    """Test functions in cobra.design"""
-    def test_dual(self, model):
-        assert abs(model.optimize("maximize").f - 0.874) < 0.001
-        dual = dual_problem(model)
-        assert abs(dual.optimize("minimize").f - 0.874) < 0.001
-
-    def test_dual_integer_vars_as_lp(self, model):
-        var = _add_decision_variable(model, "AKGDH")
-        assert abs(model.optimize("maximize").f - 0.874) < 0.001
-        # as lp: make integer continuous, set to 1
-        dual = dual_problem(model, "maximize", [var.id], copy=True)
-        r = dual.reactions.get_by_id(var.id)
-        r.variable_kind = "continuous"
-        r.lower_bound = r.upper_bound = 1
-        assert abs(dual.optimize("minimize").f - 0.874) < 0.001
-        r.lower_bound = r.upper_bound = 0
-        assert abs(dual.optimize("minimize").f - 0.858) < 0.001
-
-    @pytest.mark.skipif(no_mip_solver, reason="no MILP solver found")
-    def test_dual_integer_vars_as_mip(self, model):
-        # mip
-        var = _add_decision_variable(model, "AKGDH")
-        dual = dual_problem(model, "maximize", [var.id], copy=True)
-        var_in_dual = dual.reactions.get_by_id(var.id)
-        # minimization, so the optimal value state is to turn off AKGDH
-        assert abs(dual.optimize("minimize").f - 0.858) < 0.001
-        # turn off AKGDH in dual
-        var_in_dual.lower_bound = var_in_dual.upper_bound = 1
-        assert abs(dual.optimize("minimize").f - 0.874) < 0.001
-        # turn on AKGDH in dual
-        var_in_dual.lower_bound = var_in_dual.upper_bound = 0
-        assert abs(dual.optimize("minimize").f - 0.858) < 0.001
-
-    @pytest.mark.skipif(no_mip_solver, reason="no MILP solver found")
-    def test_optknock(self, model):
-        model.reactions.get_by_id("EX_o2_e").lower_bound = 0
-        knockable_reactions = ["ACKr", "AKGDH", "ACALD", "LDH_D"]
-        optknock_problem = set_up_optknock(model, "EX_lac__D_e",
-                                           knockable_reactions, n_knockouts=2,
-                                           copy=False)
-        solution = run_optknock(optknock_problem, tolerance_integer=1e-9)
-        assert "ACKr" in solution.knockouts
-        assert "ACALD" in solution.knockouts
-        assert abs(solution.f - 17.891) < 0.001
diff --git a/cobra/test/test_flux_analysis.py b/cobra/test/test_flux_analysis.py
index 36b6ca9..773d10b 100644
--- a/cobra/test/test_flux_analysis.py
+++ b/cobra/test/test_flux_analysis.py
@@ -1,36 +1,85 @@
-import pytest
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import re
 import sys
-from os import name
+import warnings
 from contextlib import contextmanager
-import re
-from six import iteritems, StringIO
-from cobra.core import Model, Reaction, Metabolite
-from cobra.solvers import solver_dict, get_solver_name
+from os import name
+
+import pytest
+import numpy
+from optlang.interface import OPTIMAL, INFEASIBLE
+from six import StringIO, iteritems
+
+import cobra.util.solver as sutil
+from cobra.core import Metabolite, Model, Reaction
 from cobra.flux_analysis import *
-from cobra.solvers import SolverNotFound
-from .conftest import model, large_model, solved_model, fva_results
+from cobra.flux_analysis.parsimonious import add_pfba
+from cobra.flux_analysis.sampling import ACHRSampler, OptGPSampler
+from cobra.flux_analysis.reaction import assess
+from cobra.manipulation import convert_to_irreversible
+from cobra.solvers import SolverNotFound, get_solver_name, solver_dict
+from cobra.exceptions import Infeasible
 
 try:
-    import numpy
+    import scipy
+    from cobra.flux_analysis.moma import add_moma
 except ImportError:
-    numpy = None
+    scipy = None
+    add_moma = None
 try:
     import matplotlib
-except ImportError:
+except (ImportError, RuntimeError):
     matplotlib = None
 try:
-    import pandas
-except ImportError:
-    pandas = None
-try:
-    import tabulate
-except ImportError:
-    tabulate = None
+    from matplotlib import pyplot
+    from mpl_toolkits.mplot3d import axes3d
+except (ImportError, RuntimeError):
+    pyplot = None
+    axes3d = None
+
+# The scipt interface is currently unstable and may yield errors or infeasible
+# solutions
+stable_optlang = ["glpk", "cplex", "gurobi"]
+optlang_solvers = ["optlang-" + s for s in stable_optlang if s in
+                   sutil.solvers]
+all_solvers = optlang_solvers + list(solver_dict)
+
+
+def construct_ll_test_model():
+    test_model = Model()
+    test_model.add_metabolites(Metabolite("A"))
+    test_model.add_metabolites(Metabolite("B"))
+    test_model.add_metabolites(Metabolite("C"))
+    EX_A = Reaction("EX_A")
+    EX_A.add_metabolites({test_model.metabolites.A: 1})
+    DM_C = Reaction("DM_C")
+    DM_C.add_metabolites({test_model.metabolites.C: -1})
+    v1 = Reaction("v1")
+    v1.add_metabolites({test_model.metabolites.A: -1,
+                        test_model.metabolites.B: 1})
+    v2 = Reaction("v2")
+    v2.add_metabolites({test_model.metabolites.B: -1,
+                        test_model.metabolites.C: 1})
+    v3 = Reaction("v3")
+    v3.add_metabolites({test_model.metabolites.C: -1,
+                        test_model.metabolites.A: 1})
+    test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
+    DM_C.objective_coefficient = 1
+    return test_model
+
+
+ at pytest.fixture(scope="function", params=optlang_solvers)
+def ll_test_model(request):
+    test_model = construct_ll_test_model()
+    test_model.solver = request.param
+    return test_model
 
 
 @contextmanager
 def captured_output():
-    """ A context manager to test the IO summary methods """
+    """A context manager to test the IO summary methods."""
     new_out, new_err = StringIO(), StringIO()
     old_out, old_err = sys.stdout, sys.stderr
     try:
@@ -41,122 +90,198 @@ def captured_output():
 
 
 class TestCobraFluxAnalysis:
-    """Test the simulation functions in cobra.flux_analysis"""
+    """Test the simulation functions in cobra.flux_analysis."""
 
-    @pytest.mark.parametrize("solver", list(solver_dict))
+    @pytest.mark.parametrize("solver", all_solvers)
     def test_pfba_benchmark(self, large_model, benchmark, solver):
-        benchmark(optimize_minimal_flux, large_model, solver=solver)
+        convert_to_irreversible(large_model)
 
-    @pytest.mark.parametrize("solver", list(solver_dict))
-    def test_pfba(self, model, solver):
-        optimize_minimal_flux(model, solver=solver)
-        abs_x = [abs(i) for i in model.solution.x]
-        assert model.solution.status == "optimal"
-        assert abs(model.solution.f - 0.8739) < 0.001
-        assert abs(sum(abs_x) - 518.4221) < 0.001
+        def do_pfba(solver):
+            pfba(large_model, solver=solver,
+                 already_irreversible=True)
 
-        # Test desired_objective_value
-        desired_objective = 0.8
-        optimize_minimal_flux(model, solver=solver,
-                              desired_objective_value=desired_objective)
-        abs_x = [abs(i) for i in model.solution.x]
-        assert model.solution.status == "optimal"
-        assert abs(model.solution.f - desired_objective) < 0.001
-        assert abs(sum(abs_x) - 476.1594) < 0.001
+        benchmark(do_pfba, solver)
 
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_pfba(self, model, solver):
+        with model:
+            add_pfba(model)
+            with pytest.raises(ValueError):
+                add_pfba(model)
+
+        if solver in optlang_solvers:
+            model.solver = solver
+        expression = model.objective.expression
+        n_constraints = len(model.constraints)
+        solution = pfba(model, solver=solver)
+        assert solution.status == "optimal"
+        assert numpy.isclose(solution.x_dict["Biomass_Ecoli_core"],
+                             0.8739, atol=1e-4, rtol=0.0)
+        abs_x = [abs(i) for i in solution.x]
+        assert numpy.isclose(sum(abs_x), 518.4221, atol=1e-4, rtol=0.0)
+        # test changes to model reverted
+        assert expression == model.objective.expression
+        assert len(model.constraints) == n_constraints
+
+        # needed?
+        # Test desired_objective_value
+        # desired_objective = 0.8
+        # pfba(model, solver=solver,
+        #                       desired_objective_value=desired_objective)
+        # abs_x = [abs(i) for i in model.solution.x]
+        # assert model.solution.status == "optimal"
+        # assert abs(model.solution.f - desired_objective) < 0.001
+        # assert abs(sum(abs_x) - 476.1594) < 0.001
+
+        # TODO: parametrize fraction (DRY it up)
         # Test fraction_of_optimum
-        optimize_minimal_flux(model, solver=solver,
-                              fraction_of_optimum=0.95)
-        abs_x = [abs(i) for i in model.solution.x]
-        assert model.solution.status == "optimal"
-        assert abs(model.solution.f - 0.95 * 0.8739) < 0.001
-        assert abs(sum(abs_x) - 493.4400) < 0.001
-
-        # Make sure the model works for non-unity objective values
-        model.reactions.Biomass_Ecoli_core.objective_coefficient = 2
-        optimize_minimal_flux(model, solver=solver)
-        assert abs(model.solution.f - 2 * 0.8739) < 0.001
-        model.reactions.Biomass_Ecoli_core.objective_coefficient = 1
-
-        # Test some erroneous inputs -- multiple objectives
-        model.reactions.ATPM.objective_coefficient = 1
-        with pytest.raises(ValueError):
-            optimize_minimal_flux(model, solver=solver)
-        model.reactions.ATPM.objective_coefficient = 0
-
-        # Minimization of objective
-        with pytest.raises(ValueError):
-            optimize_minimal_flux(model, solver=solver,
-                                  objective_sense='minimize')
+        solution = pfba(model, solver=solver,
+                        fraction_of_optimum=0.95)
+        assert solution.status == "optimal"
+        assert numpy.isclose(solution.x_dict["Biomass_Ecoli_core"],
+                             0.95 * 0.8739, atol=1e-4, rtol=0.0)
+        abs_x = [abs(i) for i in solution.x]
+        assert numpy.isclose(sum(abs_x), 493.4400, atol=1e-4, rtol=0.0)
 
         # Infeasible solution
-        atpm = float(model.reactions.ATPM.lower_bound)
         model.reactions.ATPM.lower_bound = 500
-        with pytest.raises(ValueError):
-            optimize_minimal_flux(model, solver=solver)
-        model.reactions.ATPM.lower_bound = atpm
-
-    def test_single_gene_deletion_fba_benchmark(self, large_model, benchmark):
-        genes = ['b0511', 'b2521', 'b0651', 'b2502', 'b3132', 'b1486', 'b3384',
-                 'b4321', 'b3428', 'b2789', 'b0052', 'b0115',
-                 'b2167', 'b0759', 'b3389', 'b4031', 'b3916', 'b2374', 'b0677',
-                 'b2202']
-        benchmark(single_gene_deletion, large_model, gene_list=genes)
-
-    def test_single_gene_deletion_fba(self, model):
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", UserWarning)
+            with pytest.raises((UserWarning, Infeasible, ValueError)):
+                pfba(model, solver=solver)
+
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_single_gene_deletion_fba_benchmark(self, model, benchmark,
+                                                solver):
+        benchmark(single_gene_deletion, model, solver=solver)
+
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_single_gene_deletion_fba(self, model, solver):
         # expected knockouts for textbook model
         growth_dict = {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
                        "b2276": 0.21, "b1779": 0.00}
-        rates, statuses = single_gene_deletion(model,
-                                               gene_list=growth_dict.keys(),
-                                               method="fba")
-        for gene, expected_value in iteritems(growth_dict):
-            assert statuses[gene] == 'optimal'
-            assert abs(rates[gene] - expected_value) < 0.01
-
-    def test_single_gene_deletion_moma_benchmark(self, large_model, benchmark):
+        df = single_gene_deletion(model, gene_list=growth_dict.keys(),
+                                  method="fba", solver=solver)
+        assert numpy.all([df.status == 'optimal'])
+        assert all(abs(df.flux[gene] - expected) < 0.01 for
+                   gene, expected in iteritems(growth_dict))
+
+    @pytest.mark.skipif(scipy is None,
+                        reason="moma gene deletion requires scipy")
+    def test_single_gene_deletion_moma_benchmark(self, model, benchmark):
         try:
-            get_solver_name(qp=True)
-        except SolverNotFound:
+            sutil.get_solver_name(qp=True)
+        except sutil.SolverNotFound:
             pytest.skip("no qp support")
-        genes = ['b1764', 'b0463', 'b1779', 'b0417']
-        benchmark(single_gene_deletion, large_model, gene_list=genes,
-                  method="moma")
 
+        genes = ['b0008', 'b0114', 'b2276', 'b1779']
+        benchmark(single_gene_deletion, model, gene_list=genes, method="moma")
+
+    @pytest.mark.skipif(scipy is None,
+                        reason="moma gene deletion requires scipy")
+    def test_single_deletion_linear_moma_benchmark(self, model, benchmark):
+        genes = ['b0008', 'b0114', 'b2276', 'b1779']
+        benchmark(single_gene_deletion, model, gene_list=genes,
+                  method="linear moma")
+
+    @pytest.mark.skipif(scipy is None,
+                        reason="moma gene deletion requires scipy")
+    def test_moma_sanity(self, model):
+        """Test optimization criterion and optimality."""
+        try:
+            sutil.get_solver_name(qp=True)
+        except sutil.SolverNotFound:
+            pytest.skip("no qp support")
+
+        sol = model.optimize()
+        with model:
+            model.reactions.PFK.knock_out()
+            knock_sol = model.optimize()
+            ssq = (knock_sol.fluxes - sol.fluxes).pow(2).sum()
+
+        with model:
+            add_moma(model)
+            model.reactions.PFK.knock_out()
+            moma_sol = model.optimize()
+            moma_ssq = (moma_sol.fluxes - sol.fluxes).pow(2).sum()
+
+        assert numpy.allclose(moma_sol.objective_value, moma_ssq)
+        assert moma_ssq < ssq
+
+    @pytest.mark.skipif(scipy is None,
+                        reason="moma gene deletion requires scipy")
     def test_single_gene_deletion_moma(self, model):
         try:
-            get_solver_name(qp=True)
-        except SolverNotFound:
+            sutil.get_solver_name(qp=True)
+        except sutil.SolverNotFound:
             pytest.skip("no qp support")
 
         # expected knockouts for textbook model
         growth_dict = {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
                        "b2276": 0.11, "b1779": 0.00}
 
-        rates, statuses = single_gene_deletion(model,
-                                               gene_list=growth_dict.keys(),
-                                               method="moma")
-        for gene, expected_value in iteritems(growth_dict):
-            assert statuses[gene] == 'optimal'
-            assert abs(rates[gene] - expected_value) < 0.01
-
-    def test_single_gene_deletion_benchmark(self, large_model, benchmark):
-        reactions = ['CDPMEK', 'PRATPP', 'HISTD', 'PPCDC']
-        benchmark(single_reaction_deletion, large_model,
-                  reaction_list=reactions)
-
-    def test_single_reaction_deletion(self, model):
+        df = single_gene_deletion(model, gene_list=growth_dict.keys(),
+                                  method="moma")
+        assert (df.status == 'optimal').all()
+        assert all(abs(df.flux[gene] - expected) < 0.01
+                   for gene, expected in iteritems(growth_dict))
+        with model:
+            add_moma(model)
+            with pytest.raises(ValueError):
+                add_moma(model)
+
+    @pytest.mark.skipif(scipy is None,
+                        reason="moma gene deletion requires scipy")
+    def test_linear_moma_sanity(self, model):
+        """Test optimization criterion and optimality."""
+        sol = model.optimize()
+        with model:
+            model.reactions.PFK.knock_out()
+            knock_sol = model.optimize()
+            sabs = (knock_sol.fluxes - sol.fluxes).abs().sum()
+
+        with model:
+            add_moma(model, linear=True)
+            model.reactions.PFK.knock_out()
+            moma_sol = model.optimize()
+            moma_sabs = (moma_sol.fluxes - sol.fluxes).abs().sum()
+
+        assert numpy.allclose(moma_sol.objective_value, moma_sabs)
+        assert moma_sabs < sabs
+
+    @pytest.mark.skipif(scipy is None,
+                        reason="moma gene deletion requires scipy")
+    def test_single_gene_deletion_linear_moma(self, model):
+        # expected knockouts for textbook model
+        growth_dict = {"b0008": 0.87, "b0114": 0.76, "b0116": 0.65,
+                       "b2276": 0.08, "b1779": 0.00}
+
+        df = single_gene_deletion(model, gene_list=growth_dict.keys(),
+                                  method="linear moma")
+        assert numpy.all([df.status == 'optimal'])
+        assert all(abs(df.flux[gene] - expected) < 0.01
+                   for gene, expected in iteritems(growth_dict))
+        with model:
+            add_moma(model, linear=True)
+            with pytest.raises(ValueError):
+                add_moma(model)
+
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_single_gene_deletion_benchmark(self, model, benchmark,
+                                            solver):
+        benchmark(single_reaction_deletion, model, solver=solver)
+
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_single_reaction_deletion(self, model, solver):
         expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
                             'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
 
-        results, status = single_reaction_deletion(
-            model, reaction_list=expected_results.keys())
-        assert len(results) == 6
-        assert len(status) == 6
-        for status_value in status.values():
-            assert status_value == "optimal"
-        for reaction, value in results.items():
-            assert abs(value - expected_results[reaction]) < 0.00001
+        df = single_reaction_deletion(
+            model, reaction_list=expected_results.keys(), solver=solver)
+        assert len(df) == 6
+        assert numpy.all([df.status == 'optimal'])
+        assert all(abs(df.flux[gene] - expected) < 0.00001 for
+                   gene, expected in iteritems(expected_results))
 
     @classmethod
     def compare_matrices(cls, matrix1, matrix2, places=3):
@@ -168,13 +293,11 @@ class TestCobraFluxAnalysis:
             for j in range(ncols):
                 assert abs(matrix1[i][j] - matrix2[i][j]) < 10 ** -places
 
-    @pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
     def test_double_gene_deletion_benchmark(self, large_model, benchmark):
         genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276",
                  "b1241"]
         benchmark(double_gene_deletion, large_model, gene_list1=genes)
 
-    @pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
     def test_double_gene_deletion(self, model):
         genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276",
                  "b1241"]
@@ -200,7 +323,6 @@ class TestCobraFluxAnalysis:
         assert solution["y"] == genes
         self.compare_matrices(growth_list[:-1], solution["data"])
 
-    @pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
     def test_double_reaction_deletion(self, model):
         reactions = ['FBA', 'ATPS4r', 'ENO', 'FRUpts2']
         growth_list = [[0.704, 0.135, 0.000, 0.704],
@@ -215,127 +337,230 @@ class TestCobraFluxAnalysis:
         assert solution["y"] == reactions
         self.compare_matrices(growth_list, solution["data"])
 
-    @pytest.mark.parametrize("solver", list(solver_dict))
+    @pytest.mark.parametrize("solver", all_solvers)
     def test_flux_variability_benchmark(self, large_model, benchmark, solver):
         benchmark(flux_variability_analysis, large_model, solver=solver,
                   reaction_list=large_model.reactions[1::3])
 
-    @pytest.mark.parametrize("solver", list(solver_dict))
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_flux_variability_loopless_benchmark(self, model, benchmark,
+                                                 solver):
+        benchmark(flux_variability_analysis, model, loopless=True,
+                  solver=solver, reaction_list=model.reactions[1::3])
+
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_pfba_flux_variability(self, model, pfba_fva_results,
+                                   fva_results, solver):
+        with pytest.warns(UserWarning):
+            flux_variability_analysis(
+                model, pfba_factor=0.1, solver=solver,
+                reaction_list=model.reactions[1::3])
+        fva_out = flux_variability_analysis(
+            model, pfba_factor=1.1, solver=solver,
+            reaction_list=model.reactions)
+        for name, result in iteritems(fva_out.T):
+            for k, v in iteritems(result):
+                assert abs(pfba_fva_results[k][name] - v) < 0.00001
+                assert abs(pfba_fva_results[k][name]) <= abs(
+                    fva_results[k][name])
+        loop_reactions = [model.reactions.get_by_id(rid)
+                          for rid in ("FRD7", "SUCDi")]
+        fva_loopless = flux_variability_analysis(
+            model, solver=solver, pfba_factor=1.1,
+            reaction_list=loop_reactions, loopless=True)
+        assert numpy.allclose(fva_loopless["maximum"], fva_loopless["minimum"])
+
+    @pytest.mark.parametrize("solver", all_solvers)
     def test_flux_variability(self, model, fva_results, solver):
         if solver == "esolver":
             pytest.skip("esolver too slow...")
         fva_out = flux_variability_analysis(
-            model, solver=solver, reaction_list=model.reactions[1::3])
-        for name, result in iteritems(fva_out):
+            model, solver=solver, reaction_list=model.reactions)
+        for name, result in iteritems(fva_out.T):
             for k, v in iteritems(result):
-                assert abs(fva_results[name][k] - v) < 0.00001
+                assert abs(fva_results[k][name] - v) < 0.00001
+
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_flux_variability_loopless(self, model, solver):
+        loop_reactions = [model.reactions.get_by_id(rid)
+                          for rid in ("FRD7", "SUCDi")]
+        fva_normal = flux_variability_analysis(
+            model, solver=solver, reaction_list=loop_reactions)
+        fva_loopless = flux_variability_analysis(
+            model, solver=solver, reaction_list=loop_reactions, loopless=True)
+
+        assert not numpy.allclose(fva_normal["maximum"], fva_normal["minimum"])
+        assert numpy.allclose(fva_loopless["maximum"], fva_loopless["minimum"])
+
+    def test_fva_data_frame(self, model):
+        df = flux_variability_analysis(model, return_frame=True)
+        assert numpy.all([df.columns.values == ['maximum', 'minimum']])
 
     def test_fva_infeasible(self, model):
         infeasible_model = model.copy()
         infeasible_model.reactions.get_by_id("EX_glc__D_e").lower_bound = 0
         # ensure that an infeasible model does not run FVA
-        with pytest.raises(ValueError):
+        with pytest.raises(Infeasible):
             flux_variability_analysis(infeasible_model)
 
-    def test_find_blocked_reactions(self, model):
+    def test_find_blocked_reactions_solver_none(self, model):
         result = find_blocked_reactions(model, model.reactions[40:46])
         assert result == ['FRUpts2']
 
-        result = find_blocked_reactions(model, model.reactions[42:48])
+    def test_essential_genes(self, model):
+        essential_genes = {'b2779', 'b1779', 'b0720', 'b2416',
+                           'b2926', 'b1136', 'b2415'}
+        observed_essential_genes = {g.id for g in find_essential_genes(model)}
+        assert observed_essential_genes == essential_genes
+
+    def test_essential_reactions(self, model):
+        essential_reactions = {'GLNS', 'Biomass_Ecoli_core', 'PIt2r', 'GAPD',
+                               'ACONTb', 'EX_nh4_e', 'ENO', 'EX_h_e',
+                               'EX_glc__D_e', 'ICDHyr', 'CS', 'NH4t', 'GLCpts',
+                               'PGM', 'EX_pi_e', 'PGK', 'RPI', 'ACONTa'}
+        observed_essential_reactions = {r.id for r in
+                                        find_essential_reactions(model)}
+        assert observed_essential_reactions == essential_reactions
+
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_find_blocked_reactions(self, model, solver):
+        result = find_blocked_reactions(model, model.reactions[40:46],
+                                        solver=solver)
+        assert result == ['FRUpts2']
+
+        result = find_blocked_reactions(model, model.reactions[42:48],
+                                        solver=solver)
         assert set(result) == {'FUMt2_2', 'FRUpts2'}
 
         result = find_blocked_reactions(model, model.reactions[30:50],
+                                        solver=solver,
                                         open_exchanges=True)
         assert result == []
 
-    @classmethod
-    def construct_ll_test_model(cls):
-        test_model = Model()
-        test_model.add_metabolites(Metabolite("A"))
-        test_model.add_metabolites(Metabolite("B"))
-        test_model.add_metabolites(Metabolite("C"))
-        EX_A = Reaction("EX_A")
-        EX_A.add_metabolites({test_model.metabolites.A: 1})
-        DM_C = Reaction("DM_C")
-        DM_C.add_metabolites({test_model.metabolites.C: -1})
-        v1 = Reaction("v1")
-        v1.add_metabolites({test_model.metabolites.A: -1,
-                            test_model.metabolites.B: 1})
-        v2 = Reaction("v2")
-        v2.add_metabolites({test_model.metabolites.B: -1,
-                            test_model.metabolites.C: 1})
-        v3 = Reaction("v3")
-        v3.add_metabolites({test_model.metabolites.C: -1,
-                            test_model.metabolites.A: 1})
-        DM_C.objective_coefficient = 1
-        test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
-        return test_model
-
-    def test_loopless_benchmark(self, benchmark):
-        test_model = self.construct_ll_test_model()
-        benchmark(lambda: construct_loopless_model(test_model).optimize())
-
-    def test_loopless(self):
-        try:
-            get_solver_name(mip=True)
-        except SolverNotFound:
-            pytest.skip("no MILP solver found")
-        test_model = self.construct_ll_test_model()
-        feasible_sol = construct_loopless_model(test_model).optimize()
-        test_model.reactions.get_by_id('v3').lower_bound = 1
-        infeasible_sol = construct_loopless_model(test_model).optimize()
-        assert feasible_sol.status == "optimal"
-        assert infeasible_sol.status == "infeasible"
+    def test_legacy_loopless_benchmark(self, benchmark):
+        test_model = construct_ll_test_model()
+        benchmark(lambda: construct_loopless_model(test_model).optimize(
+            solver="cglpk"))
+
+    def test_loopless_benchmark_before(self, benchmark):
+        test_model = construct_ll_test_model()
+
+        def _():
+            with test_model:
+                add_loopless(test_model)
+                test_model.optimize(solver="optlang-glpk")
+
+        benchmark(_)
+
+    def test_loopless_benchmark_after(self, benchmark):
+        test_model = construct_ll_test_model()
+        benchmark(loopless_solution, test_model)
 
-    def test_gapfilling(self):
+    def test_legacy_loopless(self):
         try:
             get_solver_name(mip=True)
         except SolverNotFound:
             pytest.skip("no MILP solver found")
+        test_model = construct_ll_test_model()
+        feasible_sol = construct_loopless_model(test_model).optimize(
+            solver="cglpk")
+        assert feasible_sol.status == "optimal"
+        test_model.reactions.v3.lower_bound = 1
+        infeasible_mod = construct_loopless_model(test_model)
+
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", UserWarning)
+            with pytest.raises(UserWarning):
+                infeasible_mod.optimize(solver="cglpk")
+
+    def test_loopless_solution(self, ll_test_model):
+        solution_feasible = loopless_solution(ll_test_model)
+        ll_test_model.reactions.v3.lower_bound = 1
+        ll_test_model.optimize()
+        solution_infeasible = loopless_solution(ll_test_model)
+        assert solution_feasible.fluxes["v3"] == 0.0
+        assert solution_infeasible.fluxes["v3"] == 1.0
+
+    def test_loopless_solution_fluxes(self, model):
+        fluxes = model.optimize().fluxes
+        ll_solution = loopless_solution(model, fluxes=fluxes)
+        assert len(ll_solution.fluxes) == len(model.reactions)
+        fluxes["Biomass_Ecoli_core"] = 1
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", UserWarning)
+            with pytest.raises(UserWarning):
+                loopless_solution(model, fluxes=fluxes)
+
+    def test_add_loopless(self, ll_test_model):
+        add_loopless(ll_test_model)
+        feasible_status = ll_test_model.optimize().status
+        ll_test_model.reactions.v3.lower_bound = 1
+        ll_test_model.slim_optimize()
+        infeasible_status = ll_test_model.solver.status
+        assert feasible_status == OPTIMAL
+        assert infeasible_status == INFEASIBLE
+
+    def test_gapfilling(self, salmonella):
         m = Model()
-        m.add_metabolites(map(Metabolite, ["a", "b", "c"]))
-        r = Reaction("EX_A")
-        m.add_reaction(r)
-        r.add_metabolites({m.metabolites.a: 1})
-        r = Reaction("r1")
-        m.add_reaction(r)
-        r.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
-        r = Reaction("DM_C")
-        m.add_reaction(r)
-        r.add_metabolites({m.metabolites.c: -1})
-        r.objective_coefficient = 1
-
-        U = Model()
-        r = Reaction("a2b")
-        U.add_reaction(r)
-        r.build_reaction_from_string("a --> b", verbose=False)
-        r = Reaction("a2d")
-        U.add_reaction(r)
-        r.build_reaction_from_string("a --> d", verbose=False)
-
-        # GrowMatch
-        result = gapfilling.growMatch(m, U)[0]
-        assert len(result) == 1
-        assert result[0].id == "a2b"
-        # SMILEY
-        result = gapfilling.SMILEY(m, "b", U)[0]
+        m.add_metabolites([Metabolite(m_id) for m_id in ["a", "b", "c"]])
+        exa = Reaction("EX_a")
+        exa.add_metabolites({m.metabolites.a: 1})
+        b2c = Reaction("b2c")
+        b2c.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
+        dmc = Reaction("DM_c")
+        dmc.add_metabolites({m.metabolites.c: -1})
+        m.add_reactions([exa, b2c, dmc])
+        m.objective = 'DM_c'
+
+        universal = Model()
+        a2b = Reaction("a2b")
+        a2d = Reaction("a2d")
+        universal.add_reactions([a2b, a2d])
+        a2b.build_reaction_from_string("a --> b", verbose=False)
+        a2d.build_reaction_from_string("a --> d", verbose=False)
+
+        # # GrowMatch
+        # result = gapfilling.growMatch(m, universal)[0]
+        result = gapfilling.gapfill(m, universal)[0]
         assert len(result) == 1
         assert result[0].id == "a2b"
 
-        # 2 rounds of GrowMatch with exchange reactions
-        result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
+        # # SMILEY
+        # result = gapfilling.SMILEY(m, "b", universal)[0]
+        with m:
+            m.objective = m.add_boundary(m.metabolites.b, type='demand')
+            result = gapfilling.gapfill(m, universal)[0]
+            assert len(result) == 1
+            assert result[0].id == "a2b"
+
+        # # 2 rounds of GrowMatch with exchange reactions
+        # result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
+        result = gapfilling.gapfill(m, None, exchange_reactions=True,
+                                    iterations=2)
         assert len(result) == 2
         assert len(result[0]) == 1
         assert len(result[1]) == 1
-        assert {i[0].id for i in result} == {"SMILEY_EX_b", "SMILEY_EX_c"}
+        assert {i[0].id for i in result} == {"EX_b", "EX_c"}
+
+        # somewhat bigger model
+        universal = Model("universal_reactions")
+        with salmonella as model:
+            for i in [i.id for i in model.metabolites.f6p_c.reactions]:
+                reaction = model.reactions.get_by_id(i)
+                universal.add_reactions([reaction.copy()])
+                model.remove_reactions([reaction])
+            gf = gapfilling.GapFiller(model, universal,
+                                      penalties={'TKT2': 1e3},
+                                      demand_reactions=False)
+            solution = gf.fill()
+            assert 'TKT2' not in {r.id for r in solution[0]}
+            assert gf.validate(solution[0])
 
-    @pytest.mark.skipif(numpy is None, reason="phase plane require numpy")
     def test_phenotype_phase_plane_benchmark(self, model, benchmark):
         benchmark(calculate_phenotype_phase_plane,
                   model, "EX_glc__D_e", "EX_o2_e",
                   reaction1_npoints=20, reaction2_npoints=20)
 
-    @pytest.mark.skipif(numpy is None, reason="phase plane require numpy")
     def test_phenotype_phase_plane(self, model):
         data = calculate_phenotype_phase_plane(
             model, "EX_glc__D_e", "EX_o2_e",
@@ -343,25 +568,64 @@ class TestCobraFluxAnalysis:
         assert data.growth_rates.shape == (20, 20)
         assert abs(data.growth_rates.max() - 1.20898) < 0.0001
         assert abs(data.growth_rates[0, :].max()) < 0.0001
-        if matplotlib is None:
-            pytest.skip("can't test plots without matplotlib")
+        if pyplot is None or axes3d is None:
+            pytest.skip("can't test plots without 3D plotting")
         data.plot()
 
-    def check_entries(self, out, desired_entries):
-        """ensure each entry in desired_entries appears in output"""
-        output = out.getvalue().strip()
-        output_set = set((re.sub('\s', '', l) for l in output.splitlines()))
-        for item in desired_entries:
-            assert re.sub('\s', '', item) in output_set
-
-    @pytest.mark.skipif((pandas is None) or (tabulate is None),
-                        reason="summary methods require pandas and tabulate")
-    def test_summary_methods(self, model, solved_model):
-        # Test model summary methods
-        with pytest.raises(Exception):
+    def check_line(self, output, expected_entries, pattern=re.compile(r"\s")):
+        """Ensure each expected entry is in the output."""
+        output_set = set(pattern.sub("", line) for line in output.splitlines())
+        for elem in expected_entries:
+            assert pattern.sub("", elem) in output_set
+
+    def check_in_line(self, output, expected_entries,
+                      pattern=re.compile(r"\s")):
+        """Ensure each expected entry is contained in the output."""
+        output_strip = [pattern.sub("", line) for line in output.splitlines()]
+        for elem in expected_entries:
+            assert any(
+                pattern.sub("", elem) in line for line in output_strip), \
+                "Not found: {} in:\n{}".format(pattern.sub("", elem),
+                                               "\n".join(output_strip))
+
+    def test_model_summary_previous_solution(self, model, opt_solver):
+        model.solver = opt_solver
+        solution = model.optimize()
+        rxn_test = model.exchanges[0]
+        met_test = list(rxn_test.metabolites.keys())[0].id
+
+        solution.fluxes[rxn_test.id] = 321
+
+        with captured_output() as (out, err):
+            model.summary(solution)
+        self.check_in_line(out.getvalue(), [met_test + '321'])
+
+    def test_model_summary(self, model, opt_solver):
+        model.solver = opt_solver
+        # test non-fva version (these should be fixed for textbook model
+        expected_entries = [
+            'o2_e      21.8',
+            'glc__D_e  10',
+            'nh4_e      4.77',
+            'pi_e       3.21',
+            'h2o_e  29.2',
+            'co2_e  22.8',
+            'h_e    17.5',
+            'Biomass_Ecol...  0.874',
+        ]
+        # Need to use a different method here because
+        # there are multiple entries per line.
+        model.optimize()
+        with captured_output() as (out, err):
             model.summary()
+        self.check_in_line(out.getvalue(), expected_entries)
 
-        desired_entries = [
+    @pytest.mark.parametrize("fraction", [0.95])
+    def test_model_summary_with_fva(self, model, opt_solver, fraction):
+        if opt_solver == "optlang-gurobi":
+            pytest.xfail("FVA currently buggy")
+        # test non-fva version (these should be fixed for textbook model
+        expected_entries = [
             'idFluxRangeidFluxRangeBiomass_Ecol...0.874',
             'o2_e       21.8   [19.9, 23.7]'
             'h2o_e       29.2  [25, 30.7]',
@@ -380,34 +644,29 @@ class TestCobraFluxAnalysis:
             'etoh_e       0    [0, 1.11]',
             'acald_e      0    [0, 1.27]',
         ]
-        for solver in solver_dict:
-            with captured_output() as (out, err):
-                solved_model.summary(fva=0.95, solver=solver)
-            self.check_entries(out, desired_entries)
-
-        # test non-fva version (these should be fixed for textbook model
-        desired_entries = [
-            'o2_e      21.8',
-            'glc__D_e  10',
-            'nh4_e      4.77',
-            'pi_e       3.21',
-            'h2o_e  29.2',
-            'co2_e  22.8',
-            'h_e    17.5',
-            'Biomass_Ecol...  0.874',
-        ]
         # Need to use a different method here because
         # there are multiple entries per line.
-        for solver in solver_dict:
-            with captured_output() as (out, err):
-                solved_model.summary()
-
-            s = out.getvalue()
-            for i in desired_entries:
-                assert i in s
-
-        # Test metabolite summary methods
-        desired_entries = [
+        model.solver = opt_solver
+        solution = model.optimize()
+        with captured_output() as (out, err):
+            model.summary(solution, fva=fraction)
+        self.check_in_line(out.getvalue(), expected_entries)
+
+    @pytest.mark.parametrize("met", ["q8_c"])
+    def test_metabolite_summary_previous_solution(
+            self, model, opt_solver, met):
+        model.solver = opt_solver
+        solution = pfba(model)
+        model.metabolites.get_by_id(met).summary(solution)
+
+    @pytest.mark.parametrize("met", ["q8_c"])
+    def test_metabolite_summary(self, model, opt_solver, met):
+        model.solver = opt_solver
+        model.optimize()
+        with captured_output() as (out, err):
+            model.metabolites.get_by_id(met).summary()
+
+        expected_entries = [
             'PRODUCING REACTIONS -- Ubiquinone-8 (q8_c)',
             '%       FLUX  RXN ID    REACTION',
             '100%   43.6   CYTBD     '
@@ -419,26 +678,193 @@ class TestCobraFluxAnalysis:
             '12%     5.06  SUCDi     q8_c + succ_c --> fum_c + q8h2_c',
         ]
 
-        for solver in solver_dict:
-            with captured_output() as (out, err):
-                solved_model.metabolites.q8_c.summary()
-            self.check_entries(out, desired_entries)
+        self.check_in_line(out.getvalue(), expected_entries)
+
+    @pytest.mark.parametrize("fraction, met", [(0.99, "fdp_c")])
+    def test_metabolite_summary_with_fva(self, model, opt_solver, fraction,
+                                         met):
+        if opt_solver in ("optlang-glpk", "optlang-cplex", "optlang-gurobi"):
+            pytest.xfail("FVA currently buggy")
 
-        desired_entries = [
+        model.solver = opt_solver
+        model.optimize()
+        with captured_output() as (out, err):
+            model.metabolites.get_by_id(met).summary(fva=fraction)
+
+        expected_entries = [
             'PRODUCING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
-            '----------------------------------------------------------',
             '%       FLUX  RANGE         RXN ID    REACTION',
             '100%    7.48  [6.17, 9.26]  PFK       '
             'atp_c + f6p_c --> adp_c + fdp_c + h_c',
             'CONSUMING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
-            '----------------------------------------------------------',
             '%       FLUX  RANGE         RXN ID    REACTION',
             '100%    7.48  [6.17, 8.92]  FBA       fdp_c <=> dhap_c + g3p_c',
             '0%      0     [0, 1.72]     FBP       '
             'fdp_c + h2o_c --> f6p_c + pi_c',
         ]
 
-        for solver in solver_dict:
-            with captured_output() as (out, err):
-                solved_model.metabolites.fdp_c.summary(fva=0.99, solver=solver)
-            self.check_entries(out, desired_entries)
+        self.check_line(out.getvalue(), expected_entries)
+
+
+class TestCobraFluxSampling:
+    """Tests and benchmark flux sampling."""
+
+    def test_single_achr(self, model):
+        s = sample(model, 10, method="achr")
+        assert s.shape == (10, len(model.reactions))
+
+    def test_single_optgp(self, model):
+        s = sample(model, 10, processes=1)
+        assert s.shape == (10, len(model.reactions))
+
+    def test_multi_optgp(self, model):
+        s = sample(model, 10, processes=2)
+        assert s.shape == (10, len(model.reactions))
+
+    def test_wrong_method(self, model):
+        with pytest.raises(ValueError):
+            sample(model, 1, method="schwupdiwupp")
+
+    def test_validate_wrong_sample(self, model):
+        s = self.achr.sample(10)
+        s["hello"] = 1
+        with pytest.raises(ValueError):
+            self.achr.validate(s)
+
+    def test_fixed_seed(self, model):
+        s = sample(model, 1, seed=42)
+        assert numpy.allclose(s.TPI[0], 8.9516392250671544)
+
+    def test_equality_constraint(self, model):
+        model.reactions.ACALD.bounds = (-1.5, -1.5)
+        s = sample(model, 10)
+        assert numpy.allclose(s.ACALD, -1.5, atol=1e-6, rtol=0)
+        s = sample(model, 10, method="achr")
+        assert numpy.allclose(s.ACALD, -1.5, atol=1e-6, rtol=0)
+
+    def test_inequality_constraint(self, model):
+        co = model.problem.Constraint(
+            model.reactions.ACALD.flux_expression, lb=-0.5)
+        model.add_cons_vars(co)
+        s = sample(model, 10)
+        assert all(s.ACALD > -0.5 - 1e-6)
+        s = sample(model, 10, method="achr")
+        assert all(s.ACALD > -0.5 - 1e-6)
+
+    def setup_class(self):
+        from . import create_test_model
+        model = create_test_model("textbook")
+        achr = ACHRSampler(model, thinning=1)
+        assert ((achr.n_warmup > 0) and
+                (achr.n_warmup <= 2 * len(model.variables)))
+        assert all(achr.validate(achr.warmup) == "v")
+        self.achr = achr
+
+        optgp = OptGPSampler(model, processes=1, thinning=1)
+        assert ((optgp.n_warmup > 0) and
+                (optgp.n_warmup <= 2 * len(model.variables)))
+        assert all(optgp.validate(optgp.warmup) == "v")
+        self.optgp = optgp
+
+    def test_achr_init_benchmark(self, model, benchmark):
+        benchmark(lambda: ACHRSampler(model))
+
+    def test_optgp_init_benchmark(self, model, benchmark):
+        benchmark(lambda: OptGPSampler(model, processes=2))
+
+    def test_sampling(self):
+        s = self.achr.sample(10)
+        assert all(self.achr.validate(s) == "v")
+
+        s = self.optgp.sample(10)
+        assert all(self.optgp.validate(s) == "v")
+
+    def test_achr_sample_benchmark(self, benchmark):
+        benchmark(self.achr.sample, 1)
+
+    def test_optgp_sample_benchmark(self, benchmark):
+        benchmark(self.optgp.sample, 1)
+
+    def test_batch_sampling(self):
+        for b in self.achr.batch(5, 4):
+            assert all(self.achr.validate(b) == "v")
+
+        for b in self.optgp.batch(5, 4):
+            assert all(self.optgp.validate(b) == "v")
+
+    def test_variables_samples(self):
+        vnames = numpy.array([v.name for v in self.achr.model.variables])
+        s = self.achr.sample(10, fluxes=False)
+        assert s.shape == (10, self.achr.warmup.shape[1])
+        assert (s.columns == vnames).all()
+        assert (self.achr.validate(s) == "v").all()
+        s = self.optgp.sample(10, fluxes=False)
+        assert s.shape == (10, self.optgp.warmup.shape[1])
+        assert (s.columns == vnames).all()
+        assert (self.optgp.validate(s) == "v").all()
+
+    def test_inhomogeneous_sanity(self, model):
+        """Test whether inhomogeneous sampling gives approximately the same
+           standard deviation as a homogeneous version."""
+        model.reactions.ACALD.bounds = (-1.5, -1.5)
+        s_inhom = sample(model, 64)
+        model.reactions.ACALD.bounds = (-1.5 - 1e-3, -1.5 + 1e-3)
+        s_hom = sample(model, 64)
+        relative_diff = (s_inhom.std() + 1e-12)/(s_hom.std() + 1e-12)
+        assert 0.5 < relative_diff.abs().mean() < 2
+
+        model.reactions.ACALD.bounds = (-1.5, -1.5)
+        s_inhom = sample(model, 64, method="achr")
+        model.reactions.ACALD.bounds = (-1.5 - 1e-3, -1.5 + 1e-3)
+        s_hom = sample(model, 64, method="achr")
+        relative_diff = (s_inhom.std() + 1e-12)/(s_hom.std() + 1e-12)
+        assert 0.5 < relative_diff.abs().mean() < 2
+
+    def test_reproject(self):
+        s = self.optgp.sample(10, fluxes=False).as_matrix()
+        proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
+        assert all(self.optgp.validate(proj) == "v")
+        s = numpy.random.rand(10, self.optgp.warmup.shape[1])
+        proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
+        assert all(self.optgp.validate(proj) == "v")
+
+
+class TestProductionEnvelope:
+    """Test the production envelope."""
+
+    def test_envelope_one(self, model):
+        df = production_envelope(model, ["EX_o2_e"])
+        assert abs(sum(df.flux) - 9.342) < 0.001
+
+    def test_envelope_multi_reaction_objective(self, model):
+        obj = {model.reactions.EX_ac_e: 1,
+               model.reactions.EX_co2_e: 1}
+        with pytest.raises(ValueError):
+            production_envelope(model, "EX_o2_e", obj)
+
+    def test_envelope_two(self, model):
+        df = production_envelope(model, ["EX_glc__D_e", "EX_o2_e"],
+                                 objective="EX_ac_e")
+        assert abs(numpy.sum(df.carbon_yield) - 83.579) < 0.001
+        assert abs(numpy.sum(df.flux) - 1737.466) < 0.001
+        assert abs(numpy.sum(df.mass_yield) - 82.176) < 0.001
+
+
+class TestReactionUtils:
+    """Test the assess_ functions in reactions.py."""
+
+    @pytest.mark.parametrize("solver", all_solvers)
+    def test_assess(self, model, solver):
+        with model:
+            assert assess(model, model.reactions.GLCpts, solver=solver) is True
+            pyr = model.metabolites.pyr_c
+            a = Metabolite('a')
+            b = Metabolite('b')
+            model.add_metabolites([a, b])
+            pyr_a2b = Reaction('pyr_a2b')
+            pyr_a2b.add_metabolites({pyr: -1, a: -1, b: 1})
+            model.add_reactions([pyr_a2b])
+            res = assess(model, pyr_a2b, 0.01, solver=solver)
+            expected = {'precursors': {a: {'required': 0.01, 'produced': 0.0}},
+                        'products': {b: {'required': 0.01, 'capacity': 0.0}}}
+            assert res == expected
diff --git a/cobra/test/test_io.py b/cobra/test/test_io.py
index b2c7ad0..8c03df3 100644
--- a/cobra/test/test_io.py
+++ b/cobra/test/test_io.py
@@ -1,13 +1,19 @@
-from warnings import warn
-from tempfile import gettempdir
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from collections import namedtuple
+from functools import partial
 from os import unlink
 from os.path import join, split
-from six import iteritems
-from functools import partial
-from pickle import load, dump
+from pickle import dump, load
+from tempfile import gettempdir
+from warnings import warn
+
 import pytest
-from collections import namedtuple
+from six import iteritems
+
 from cobra import io
+
 from .conftest import data_directory
 
 
@@ -87,6 +93,8 @@ trials = [IOTrial('fbc2', 'mini.pickle', 'mini_fbc2.xml',
                       io.load_matlab_model, io.save_matlab_model, None)),
           IOTrial('json', 'mini.pickle', 'mini.json',
                   io.load_json_model, io.save_json_model, validate_json),
+          IOTrial('yaml', 'mini.pickle', 'mini.yml',
+                  io.load_yaml_model, io.save_yaml_model, None),
           IOTrial('pickle', 'mini.pickle', 'mini.pickle',
                   read_pickle, write_pickle, None),
           pytest.mark.skipif("not cPickle")(
@@ -97,6 +105,28 @@ trials = [IOTrial('fbc2', 'mini.pickle', 'mini_fbc2.xml',
 trial_names = [node.name for node in trials]
 
 
+ at pytest.mark.skipif(scipy is not None, reason='scipy available')
+def raise_scipy_errors():
+    with pytest.raises(ImportError):
+        io.save_matlab_model(None, 'test')
+    with pytest.raises(ImportError):
+        io.load_matlab_model('test')
+
+
+ at pytest.mark.skipif(libsbml is not None, reason='libsbml available')
+def raise_libsbml_errors():
+    with pytest.raises(ImportError):
+        io.read_sbml_model('test')
+    with pytest.raises(ImportError):
+        io.write_sbml_model(None, 'test')
+    with pytest.raises(ImportError):
+        io.load_matlab_model('test')
+    with pytest.raises(ImportError):
+        io.write_legacy_sbml(None, 'test')
+    with pytest.raises(ImportError):
+        io.read_legacy_sbml(None, 'test')
+
+
 @pytest.fixture(scope="module", params=trials, ids=trial_names)
 def io_trial(request, data_directory):
     with open(join(data_directory, request.param.reference_file),
@@ -106,7 +136,10 @@ def io_trial(request, data_directory):
                                                   request.param.test_file))
     test_output_filename = join(gettempdir(),
                                 split(request.param.test_file)[-1])
-    request.param.write_function(test_model, test_output_filename)
+    # test writing the model within a context with a non-empty stack
+    with test_model:
+        test_model.objective = test_model.objective
+        request.param.write_function(test_model, test_output_filename)
     reread_model = request.param.read_function(test_output_filename)
     unlink(test_output_filename)
     return request.param.name, reference_model, test_model, reread_model
@@ -117,6 +150,7 @@ class TestCobraIO:
     def compare_models(cls, name, model1, model2):
         assert len(model1.reactions) == len(model2.reactions)
         assert len(model1.metabolites) == len(model2.metabolites)
+        assert model1.objective.direction == model2.objective.direction
         for attr in ("id", "name", "lower_bound", "upper_bound",
                      "objective_coefficient", "gene_reaction_rule"):
             assert getattr(model1.reactions[0], attr) == getattr(
@@ -140,9 +174,9 @@ class TestCobraIO:
             model2.reactions[-1].metabolites)
         assert len(model1.genes) == len(model2.genes)
         # ensure they have the same solution max
-        model1.optimize()
-        model2.optimize()
-        assert abs(model1.solution.f - model2.solution.f) < 0.001
+        solution1 = model1.optimize()
+        solution2 = model2.optimize()
+        assert abs(solution1.f - solution2.f) < 0.001
         # ensure the references are correct
         assert model2.metabolites[0]._model is model2
         assert model2.reactions[0]._model is model2
diff --git a/cobra/test/test_manipulation.py b/cobra/test/test_manipulation.py
index b77963a..40b98d5 100644
--- a/cobra/test/test_manipulation.py
+++ b/cobra/test/test_manipulation.py
@@ -1,6 +1,11 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import pytest
+
 from cobra.core import Metabolite, Model, Reaction
 from cobra.manipulation import *
-import pytest
+
 from .conftest import model, salmonella
 
 
@@ -8,53 +13,59 @@ class TestManipulation:
     """Test functions in cobra.manipulation"""
 
     def test_canonical_form(self, model):
+        solver = 'cglpk'
         # add G constraint to test
         g_constr = Metabolite("SUCCt2_2__test_G_constraint")
         g_constr._constraint_sense = "G"
         g_constr._bound = 5.0
         model.reactions.get_by_id("SUCCt2_2").add_metabolites({g_constr: 1})
-        assert abs(model.optimize("maximize").f - 0.855) < 0.001
+        assert abs(model.optimize("maximize", solver=solver).f - 0.855) < 0.001
         # convert to canonical form
         model = canonical_form(model)
-        assert abs(model.optimize("maximize").f - 0.855) < 10 ** -3
+        assert abs(
+            model.optimize("maximize", solver=solver).f - 0.855) < 10 ** -3
 
     def test_canonical_form_minimize(self, model):
+        solver = 'cglpk'
         # make a minimization problem
         model.reactions.get_by_id("Biomass_Ecoli_core").lower_bound = 0.5
         for reaction in model.reactions:
             reaction.objective_coefficient = reaction.id == "GAPD"
-        assert abs(model.optimize("minimize").f - 6.27) < 10 ** -3
+        assert abs(
+            model.optimize("minimize", solver=solver).f - 6.27) < 10 ** -3
         # convert to canonical form. Convert minimize to maximize
         model = canonical_form(model, objective_sense="minimize")
-        assert abs(model.optimize("maximize").f + 6.27) < 10 ** -3
+        assert abs(
+            model.optimize("maximize", solver=solver).f + 6.27) < 10 ** -3
         # lower bounds should now be <= constraints
         assert model.reactions.get_by_id(
             "Biomass_Ecoli_core").lower_bound == 0.0
 
     def test_modify_reversible(self, model):
+        solver = 'cglpk'
         model1 = model.copy()
-        model1.optimize()
+        solution1 = model1.optimize(solver=solver)
         model2 = model.copy()
         convert_to_irreversible(model2)
-        model2.optimize()
-        assert abs(model1.solution.f - model2.solution.f) < 10 ** -3
+        solution2 = model2.optimize(solver=solver)
+        assert abs(solution1.f - solution2.f) < 10 ** -3
         revert_to_reversible(model2)
-        model2.optimize()
-        assert abs(model1.solution.f - model2.solution.f) < 10 ** -3
+        solution2_rev = model2.optimize(solver=solver)
+        assert abs(solution1.f - solution2_rev.f) < 10 ** -3
         # Ensure revert_to_reversible is robust to solutions generated both
         # before and after reversibility conversion, or not solved at all.
         model3 = model.copy()
-        model3.optimize()
+        solution3 = model3.optimize(solver=solver)
         convert_to_irreversible(model3)
         revert_to_reversible(model3)
-        assert abs(model1.solution.f - model3.solution.f) < 10 ** -3
+        assert abs(solution1.f - solution3.f) < 10 ** -3
         # test reaction where both bounds are negative
         model4 = model.copy()
         glc = model4.reactions.get_by_id("EX_glc__D_e")
         glc.upper_bound = -1
         convert_to_irreversible(model4)
-        model4.optimize()
-        assert abs(model1.solution.f - model4.solution.f) < 10 ** -3
+        solution4 = model4.optimize(solver=solver)
+        assert abs(solution1.f - solution4.f) < 10 ** -3
         glc_rev = model4.reactions.get_by_id(glc.notes["reflection"])
         assert glc_rev.lower_bound == 1
         assert glc.upper_bound == 0
@@ -213,14 +224,6 @@ class TestManipulation:
         assert rxns.DM_h_c.annotation["SBO"] == "SBO:0000628"
         assert rxns.EX_h_e.annotation["SBO"] == "SBO:0000628"
 
-    def test_validate_reaction_bounds(self, model):
-        model.reactions[0].lower_bound = float("-inf")
-        model.reactions[1].lower_bound = float("nan")
-        model.reactions[0].upper_bound = float("inf")
-        model.reactions[1].upper_bound = float("nan")
-        errors = check_reaction_bounds(model)
-        assert len(errors) == 4
-
     def test_validate_formula_compartment(self, model):
         model.metabolites[1].compartment = "fake"
         model.metabolites[1].formula = "(a*.bcde)"
@@ -232,7 +235,7 @@ class TestManipulation:
         # if we remove the SBO term which marks the reaction as
         # mass balanced, then the reaction should be detected as
         # no longer mass balanced
-        EX_rxn = model.reactions.query("EX")[0]
+        EX_rxn = model.reactions.query(lambda r: r.boundary)[0]
         EX_rxn.annotation.pop("SBO")
         balance = check_mass_balance(model)
         assert len(balance) == 1
@@ -240,7 +243,7 @@ class TestManipulation:
         m1 = Metabolite('m1', formula='()')
         r1 = Reaction('r1')
         r1.add_metabolites({m1: 1})
-        with pytest.raises(ValueError):
+        with pytest.raises(ValueError), pytest.warns(UserWarning):
             r1.check_mass_balance()
 
     def test_prune_unused(self, model):
diff --git a/cobra/test/test_model.py b/cobra/test/test_model.py
index 56d8904..745d5d7 100644
--- a/cobra/test/test_model.py
+++ b/cobra/test/test_model.py
@@ -1,9 +1,23 @@
-from copy import deepcopy
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
 import warnings
+from copy import deepcopy
+
+import numpy
+from math import isnan
 import pytest
-from cobra.core import Model, Metabolite, Reaction
+import pandas as pd
+from sympy import S
+
+import cobra.util.solver as su
+from cobra.core import Metabolite, Model, Reaction
 from cobra.solvers import solver_dict
-from .conftest import model, array_model
+from cobra.util import create_stoichiometric_matrix
+from cobra.exceptions import OptimizationError
+
+stable_optlang = ["glpk", "cplex", "gurobi"]
+optlang_solvers = ["optlang-" + s for s in stable_optlang if s in su.solvers]
 
 try:
     import scipy
@@ -20,7 +34,7 @@ class TestReactions:
         assert reaction.gene_reaction_rule == "(g1 or g2) and g3"
         assert len(reaction.genes) == 3
         # adding reaction with a GPR propagates to the model
-        model.add_reaction(reaction)
+        model.add_reactions([reaction])
         assert len(model.genes) == 3
         # ensure the gene objects are the same in the model and reaction
         reaction_gene = list(reaction.genes)[0]
@@ -61,6 +75,30 @@ class TestReactions:
         fake_gene.name = "foo_gene"
         assert reaction.gene_name_reaction_rule == fake_gene.name
 
+    def test_gene_knock_out(self, model):
+        rxn = Reaction('rxn')
+        rxn.add_metabolites({Metabolite('A'): -1, Metabolite('B'): 1})
+        rxn.gene_reaction_rule = 'A2B1 or A2B2 and A2B3'
+        assert hasattr(list(rxn.genes)[0], 'knock_out')
+        model.add_reaction(rxn)
+        with model:
+            model.genes.A2B1.knock_out()
+            assert not model.genes.A2B1.functional
+            model.genes.A2B3.knock_out()
+            assert not rxn.functional
+        assert model.genes.A2B3.functional
+        assert rxn.functional
+        model.genes.A2B1.knock_out()
+        assert not model.genes.A2B1.functional
+        assert model.reactions.rxn.functional
+        model.genes.A2B3.knock_out()
+        assert not model.reactions.rxn.functional
+
+    def test_str(self):
+        rxn = Reaction('rxn')
+        rxn.add_metabolites({Metabolite('A'): -1, Metabolite('B'): 1})
+        assert str(rxn) == 'rxn: A --> B'
+
     @pytest.mark.parametrize("solver", list(solver_dict))
     def test_add_metabolite_benchmark(self, model, benchmark, solver):
         reaction = model.reactions.get_by_id("PGI")
@@ -72,29 +110,58 @@ class TestReactions:
                 solver_dict[solver].create_problem(model)
             for m, c in many_metabolites.items():
                 try:
-                    reaction.pop(m.id)
+                    reaction.subtract_metabolites(
+                        {m: reaction.get_coefficient(m)})
                 except KeyError:
                     pass
+
         benchmark(add_remove_metabolite)
 
     def test_add_metabolite(self, model):
-        reaction = model.reactions.get_by_id("PGI")
-        reaction.add_metabolites({model.metabolites[0]: 1})
-        assert model.metabolites[0] in reaction._metabolites
-        fake_metabolite = Metabolite("fake")
-        reaction.add_metabolites({fake_metabolite: 1})
-        assert fake_metabolite in reaction._metabolites
-        assert model.metabolites.has_id("fake")
-        assert model.metabolites.get_by_id("fake") is fake_metabolite
+        with pytest.raises(ValueError):
+            model.add_metabolites(Metabolite())
+        with model:
+            with model:
+                reaction = model.reactions.get_by_id("PGI")
+                reaction.add_metabolites({model.metabolites[0]: 1})
+                assert model.metabolites[0] in reaction._metabolites
+                fake_metabolite = Metabolite("fake")
+                reaction.add_metabolites({fake_metabolite: 1})
+                assert fake_metabolite in reaction._metabolites
+                assert model.metabolites.has_id("fake")
+                assert model.metabolites.get_by_id("fake") is fake_metabolite
+            assert len(model._contexts[0]._history) == 0
+
+        assert fake_metabolite._model is None
+        assert fake_metabolite not in reaction._metabolites
+        assert "fake" not in model.metabolites
 
         # test adding by string
-        reaction.add_metabolites({"g6p_c": -1})  # already in reaction
+        with model:
+            reaction.add_metabolites({"g6p_c": -1})  # already in reaction
+            assert reaction._metabolites[
+                       model.metabolites.get_by_id("g6p_c")] == -2
+            reaction.add_metabolites({"h_c": 1})
+            assert reaction._metabolites[
+                       model.metabolites.get_by_id("h_c")] == 1
+            with pytest.raises(KeyError):
+                reaction.add_metabolites({"missing": 1})
+
         assert reaction._metabolites[
-                   model.metabolites.get_by_id("g6p_c")] == -2
-        reaction.add_metabolites({"h_c": 1})
-        assert reaction._metabolites[model.metabolites.get_by_id("h_c")] == 1
-        with pytest.raises(KeyError):
-            reaction.add_metabolites({"missing": 1})
+                   model.metabolites.get_by_id("g6p_c")] == -1
+        assert model.metabolites.h_c not in reaction._metabolites
+
+        # Test combine=False
+        reaction = model.reactions.get_by_id("ATPM")
+        old_stoich = reaction._metabolites[
+            model.metabolites.get_by_id("h2o_c")]
+        with model:
+            reaction.add_metabolites({'h2o_c': 2.5}, combine=False)
+            assert reaction._metabolites[
+                       model.metabolites.get_by_id("h2o_c")] == 2.5
+
+        assert reaction._metabolites[
+                   model.metabolites.get_by_id("h2o_c")] == old_stoich
 
         # test adding to a new Reaction
         reaction = Reaction("test")
@@ -127,8 +194,15 @@ class TestReactions:
     def test_build_from_string(self, model):
         m = len(model.metabolites)
         pgi = model.reactions.get_by_id("PGI")
-        pgi.reaction = "g6p_c --> f6p_c"
-        assert pgi.lower_bound == 0
+
+        old_bounds = pgi.bounds
+
+        with model:
+            pgi.reaction = "g6p_c --> f6p_c"
+            assert pgi.lower_bound == 0
+
+        assert pgi.bounds == old_bounds
+
         pgi.bounds = (0, 1000)
         assert pgi.bounds == (0, 1000)
         assert not pgi.reversibility
@@ -137,11 +211,25 @@ class TestReactions:
         assert pgi.reaction.strip() == "g6p_c <-- f6p_c"
         pgi.reaction = "g6p_c --> f6p_c + h2o_c"
         assert model.metabolites.h2o_c, pgi._metabolites
-        pgi.build_reaction_from_string("g6p_c --> f6p_c + foo", verbose=False)
-        assert model.metabolites.h2o_c not in pgi._metabolites
-        assert "foo" in model.metabolites
-        assert model.metabolites.foo in pgi._metabolites
-        assert len(model.metabolites) == m + 1
+
+        with model:
+            pgi.build_reaction_from_string("g6p_c --> f6p_c + foo",
+                                           verbose=False)
+            assert model.metabolites.h2o_c not in pgi._metabolites
+            assert "foo" in model.metabolites
+            assert model.metabolites.foo in pgi._metabolites
+            assert len(model.metabolites) == m + 1
+
+        assert model.metabolites.h2o_c in pgi._metabolites
+        assert "foo" not in model.metabolites
+        with pytest.raises(AttributeError):
+            assert model.metabolites.foo
+        assert len(model.metabolites) == m
+
+    def test_bounds_setter(self, model):
+        rxn = model.reactions.get_by_id("PGI")
+        with pytest.raises(AssertionError):
+            rxn.bounds = (1, 0)
 
     def test_copy(self, model):
         PGI = model.reactions.PGI
@@ -208,6 +296,9 @@ class TestReactions:
         assert new._model is not model
         assert len(new.metabolites) == 3
 
+    def test_repr_html_(self, model):
+        assert '<table>' in model.reactions[0]._repr_html_()
+
 
 class TestCobraMetabolites:
     def test_metabolite_formula(self):
@@ -225,6 +316,14 @@ class TestCobraMetabolites:
         met.elements = orig_elements
         assert met.formula == orig_formula
 
+    def test_repr_html_(self, model):
+        assert '<table>' in model.metabolites.h2o_c._repr_html_()
+
+
+class TestCobraGenes:
+    def test_repr_html_(self, model):
+        assert '<table>' in model.genes[0]._repr_html_()
+
 
 class TestCobraModel:
     """test core cobra functions"""
@@ -245,9 +344,58 @@ class TestCobraModel:
             model.add_reaction(dummy_reaction)
             if not getattr(model, 'solver', None):
                 solver_dict[solver].create_problem(model)
-            model.remove_reactions([dummy_reaction], delete=False)
+            model.remove_reactions([dummy_reaction])
+
         benchmark(benchmark_add_reaction)
 
+    def test_add_metabolite(self, model):
+        new_metabolite = Metabolite('test_met')
+        assert new_metabolite not in model.metabolites
+        with model:
+            model.add_metabolites(new_metabolite)
+            assert new_metabolite._model == model
+            assert new_metabolite in model.metabolites
+            assert new_metabolite.id in model.solver.constraints
+
+        assert new_metabolite._model is None
+        assert new_metabolite not in model.metabolites
+        assert new_metabolite.id not in model.solver.constraints
+
+    def test_remove_metabolite_subtractive(self, model):
+        test_metabolite = model.metabolites[4]
+        test_reactions = test_metabolite.reactions
+        with model:
+            model.remove_metabolites(test_metabolite, destructive=False)
+            assert test_metabolite._model is None
+            assert test_metabolite not in model.metabolites
+            assert test_metabolite.id not in model.solver.constraints
+            for reaction in test_reactions:
+                assert reaction in model.reactions
+
+        assert test_metabolite._model is model
+        assert test_metabolite in model.metabolites
+        assert test_metabolite.id in model.solver.constraints
+
+    def test_remove_metabolite_destructive(self, model):
+        test_metabolite = model.metabolites[4]
+        test_reactions = test_metabolite.reactions
+        with model:
+            model.remove_metabolites(test_metabolite, destructive=True)
+            assert test_metabolite._model is None
+            assert test_metabolite not in model.metabolites
+            assert test_metabolite.id not in model.solver.constraints
+            for reaction in test_reactions:
+                assert reaction not in model.reactions
+
+        assert test_metabolite._model is model
+        assert test_metabolite in model.metabolites
+        assert test_metabolite.id in model.solver.constraints
+        for reaction in test_reactions:
+            assert reaction in model.reactions
+
+    def test_compartments(self, model):
+        assert set(model.compartments) == {"c", "e"}
+
     def test_add_reaction(self, model):
         old_reaction_count = len(model.reactions)
         old_metabolite_count = len(model.metabolites)
@@ -289,6 +437,36 @@ class TestCobraModel:
         r2.add_metabolites({Metabolite(model.metabolites[0].id): 1})
         assert model.metabolites[0] is list(r2._metabolites)[0]
 
+    def test_add_reaction_context(self, model):
+        old_reaction_count = len(model.reactions)
+        old_metabolite_count = len(model.metabolites)
+        dummy_metabolite_1 = Metabolite("test_foo_1")
+        dummy_metabolite_2 = Metabolite("test_foo_2")
+        actual_metabolite = model.metabolites[0]
+        copy_metabolite = model.metabolites[1].copy()
+        dummy_reaction = Reaction("test_foo_reaction")
+        dummy_reaction.add_metabolites({dummy_metabolite_1: -1,
+                                        dummy_metabolite_2: 1,
+                                        copy_metabolite: -2,
+                                        actual_metabolite: 1})
+        dummy_reaction.gene_reaction_rule = 'dummy_gene'
+
+        with model:
+            model.add_reaction(dummy_reaction)
+            assert model.reactions.get_by_id(
+                dummy_reaction.id) == dummy_reaction
+            assert len(model.reactions) == old_reaction_count + 1
+            assert len(model.metabolites) == old_metabolite_count + 2
+            assert dummy_metabolite_1._model == model
+            assert 'dummy_gene' in model.genes
+
+        assert len(model.reactions) == old_reaction_count
+        assert len(model.metabolites) == old_metabolite_count
+        with pytest.raises(KeyError):
+            model.reactions.get_by_id(dummy_reaction.id)
+        assert dummy_metabolite_1._model is None
+        assert 'dummy_gene' not in model.genes
+
     def test_add_reaction_from_other_model(self, model):
         other = model.copy()
         for i in other.reactions:
@@ -304,12 +482,18 @@ class TestCobraModel:
 
     def test_model_remove_reaction(self, model):
         old_reaction_count = len(model.reactions)
-        model.remove_reactions(["PGI"])
-        assert len(model.reactions) == old_reaction_count - 1
-        with pytest.raises(KeyError):
-            model.reactions.get_by_id("PGI")
-        model.remove_reactions(model.reactions[:1])
-        assert len(model.reactions) == old_reaction_count - 2
+
+        with model:
+            model.remove_reactions(["PGI"])
+            assert len(model.reactions) == old_reaction_count - 1
+            with pytest.raises(KeyError):
+                model.reactions.get_by_id("PGI")
+            model.remove_reactions(model.reactions[:1])
+            assert len(model.reactions) == old_reaction_count - 2
+
+        assert len(model.reactions) == old_reaction_count
+        assert "PGI" in model.reactions
+
         tmp_metabolite = Metabolite("testing")
         model.reactions[0].add_metabolites({tmp_metabolite: 1})
         assert tmp_metabolite in model.metabolites
@@ -317,6 +501,11 @@ class TestCobraModel:
                                remove_orphans=True)
         assert tmp_metabolite not in model.metabolites
 
+        with model:
+            model.reactions[0].add_metabolites({tmp_metabolite: 1})
+            assert tmp_metabolite in model.metabolites
+        assert tmp_metabolite not in model.metabolites
+
     def test_reaction_remove(self, model):
         old_reaction_count = len(model.reactions)
         tmp_metabolite = Metabolite("testing")
@@ -361,7 +550,8 @@ class TestCobraModel:
         # Delete without removing orphan
         model.reactions[0].add_metabolites({tmp_metabolite: 1})
         assert len(tmp_metabolite.reactions) == 1
-        model.reactions[0].delete(remove_orphans=False)
+        with pytest.warns(DeprecationWarning):
+            model.reactions[0].delete(remove_orphans=False)
         # make sure it's still in the model
         assert tmp_metabolite in model.metabolites
         assert len(tmp_metabolite.reactions) == 0
@@ -397,12 +587,62 @@ class TestCobraModel:
         for reaction in gene_reactions:
             assert target_gene not in reaction.genes
 
+    def test_exchange_reactions(self, model):
+        assert set(model.exchanges) == set([rxn for rxn in model.reactions
+                                            if rxn.id.startswith("EX")])
+
+    @pytest.mark.parametrize("metabolites, reaction_type, prefix", [
+        ("exchange", "exchange", "EX_"),
+        ("demand", "demand", "DM_"),
+        ("sink", "sink", "SK_")
+    ], indirect=["metabolites"])
+    def test_add_boundary(self, model, metabolites, reaction_type, prefix):
+        for metabolite in metabolites:
+            reaction = model.add_boundary(metabolite, reaction_type)
+            assert model.reactions.get_by_id(
+                reaction.id) == reaction
+            assert reaction.reactants == [metabolite]
+            assert model.constraints[metabolite.id].expression.has(
+                model.variables[prefix + metabolite.id])
+
+    @pytest.mark.parametrize("metabolites, reaction_type, prefix", [
+        ("exchange", "exchange", "EX_"),
+        ("demand", "demand", "DM_"),
+        ("sink", "sink", "SK_")
+    ], indirect=["metabolites"])
+    def test_add_boundary_context(self, model, metabolites, reaction_type,
+                                  prefix):
+        with model:
+            for metabolite in metabolites:
+                reaction = model.add_boundary(metabolite, reaction_type)
+                assert model.reactions.get_by_id(
+                    reaction.id) == reaction
+                assert reaction.reactants == [metabolite]
+                assert -model.constraints[
+                    metabolite.id].expression.has(
+                    model.variables[prefix + metabolite.id])
+        for metabolite in metabolites:
+            assert prefix + metabolite.id not in model.reactions
+            assert prefix + metabolite.id not in model.variables.keys()
+
+    @pytest.mark.parametrize("metabolites, reaction_type", [
+        ("exchange", "exchange"),
+        ("demand", "demand"),
+        ("sink", "sink")
+    ], indirect=["metabolites"])
+    def test_add_existing_boundary(self, model, metabolites, reaction_type):
+        for metabolite in metabolites:
+            model.add_boundary(metabolite, reaction_type)
+            with pytest.raises(ValueError):
+                model.add_boundary(metabolite, reaction_type)
+
     @pytest.mark.parametrize("solver", list(solver_dict))
     def test_copy_benchmark(self, model, solver, benchmark):
         def _():
             model.copy()
             if not getattr(model, 'solver', None):
                 solver_dict[solver].create_problem(model)
+
         benchmark(_)
 
     @pytest.mark.parametrize("solver", list(solver_dict))
@@ -411,6 +651,7 @@ class TestCobraModel:
             large_model.copy()
             if not getattr(large_model, 'solver', None):
                 solver_dict[solver].create_problem(large_model)
+
         benchmark(_)
 
     def test_copy(self, model):
@@ -426,6 +667,12 @@ class TestCobraModel:
         model_copy.remove_reactions(model_copy.reactions[0:5])
         assert old_reaction_count == len(model.reactions)
         assert len(model.reactions) != len(model_copy.reactions)
+        # copying a model should not copy its context
+        with model:
+            model.remove_reactions([model.reactions.ACALD])
+            cp_model = model.copy()
+            assert len(cp_model._contexts) == 0
+        assert 'ACALD' not in cp_model.reactions
 
     def test_deepcopy_benchmark(self, model, benchmark):
         benchmark(deepcopy, model)
@@ -465,6 +712,49 @@ class TestCobraModel:
         # 'check not dangling metabolites when running Model.add_reactions
         assert len(orphan_metabolites) == 0
 
+    def test_merge_models(self, model, tiny_toy_model):
+        with model, tiny_toy_model:
+            # add some cons/vars to tiny_toy_model for testing merging
+            tiny_toy_model.add_reactions([Reaction('EX_glc__D_e')])
+            variable = tiny_toy_model.problem.Variable('foo')
+            constraint = tiny_toy_model.problem.Constraint(
+                variable, ub=0, lb=0, name='constraint')
+            tiny_toy_model.add_cons_vars([variable, constraint])
+
+            # test merging to new model
+            merged = model.merge(tiny_toy_model, inplace=False,
+                                 objective='sum', prefix_existing='tiny_')
+            assert 'ex1' in merged.reactions
+            assert 'ex1' not in model.reactions
+            assert merged.reactions.ex1.objective_coefficient == 1
+            assert merged.reactions.get_by_id(
+                'Biomass_Ecoli_core').objective_coefficient == 1
+            assert 'tiny_EX_glc__D_e' in merged.reactions
+            assert 'foo' in merged.variables
+
+            # test reversible in-place model merging
+            with model:
+                model.merge(tiny_toy_model, inplace=True, objective='left',
+                            prefix_existing='tiny_')
+                assert 'ex1' in model.reactions
+                assert 'constraint' in model.constraints
+                assert 'foo' in model.variables
+                assert 'tiny_EX_glc__D_e' in model.reactions
+                assert model.objective.expression == model.reactions.get_by_id(
+                    'Biomass_Ecoli_core').flux_expression
+            assert 'ex1' not in model.reactions
+            assert 'constraint' not in model.constraints
+            assert 'foo' not in model.variables
+            assert 'tiny_EX_glc__D_e' not in model.reactions
+
+        # test the deprecated operator overloading
+        with model:
+            merged = model + tiny_toy_model
+            assert 'ex1' in merged.reactions
+        with model:
+            model += tiny_toy_model
+            assert 'ex1' in model.reactions
+
     @pytest.mark.parametrize("solver", list(solver_dict))
     def test_change_objective_benchmark(self, model, benchmark, solver):
         atpm = model.reactions.get_by_id("ATPM")
@@ -473,15 +763,46 @@ class TestCobraModel:
             model.objective = atpm.id
             if not getattr(model, 'solver', None):
                 solver_dict[solver].create_problem(model)
+
         benchmark(benchmark_change_objective)
 
+    def test_slim_optimize(self, model):
+        with model:
+            assert model.slim_optimize() > 0.872
+            model.reactions.Biomass_Ecoli_core.lower_bound = 10
+            assert isnan(model.slim_optimize())
+            with pytest.raises(OptimizationError):
+                model.slim_optimize(error_value=None)
+
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_optimize(self, model, solver):
+        model.solver = solver
+        with model:
+            assert model.optimize().objective_value > 0.872
+            model.reactions.Biomass_Ecoli_core.lower_bound = 10
+            with pytest.warns(UserWarning):
+                model.optimize()
+            with pytest.raises(OptimizationError):
+                model.optimize(raise_error=True)
+
     def test_change_objective(self, model):
+        # Test for correct optimization behavior
+        model.optimize()
+        assert model.reactions.Biomass_Ecoli_core.x > 0.5
+        with model:
+            model.objective = model.reactions.EX_etoh_e
+            model.optimize()
+        assert model.reactions.Biomass_Ecoli_core.x < 0.5
+        assert model.reactions.Biomass_Ecoli_core.objective_coefficient == 1
+        model.optimize()
+        assert model.reactions.Biomass_Ecoli_core.x > 0.5
+        # test changing objective
         biomass = model.reactions.get_by_id("Biomass_Ecoli_core")
         atpm = model.reactions.get_by_id("ATPM")
         model.objective = atpm.id
         assert atpm.objective_coefficient == 1.
         assert biomass.objective_coefficient == 0.
-        assert model.objective == {atpm: 1.}
+        assert su.linear_reaction_coefficients(model) == {atpm: 1.}
         # change it back using object itself
         model.objective = biomass
         assert atpm.objective_coefficient == 0.
@@ -492,19 +813,100 @@ class TestCobraModel:
         assert biomass.objective_coefficient == 1.
         # set both using a dict
         model.objective = {atpm: 0.2, biomass: 0.3}
-        assert atpm.objective_coefficient == 0.2
-        assert biomass.objective_coefficient == 0.3
+        assert abs(atpm.objective_coefficient - 0.2) < 10 ** -9
+        assert abs(biomass.objective_coefficient - 0.3) < 10 ** -9
         # test setting by index
         model.objective = model.reactions.index(atpm)
-        assert model.objective == {atpm: 1.}
+        assert su.linear_reaction_coefficients(model) == {atpm: 1.}
         # test by setting list of indexes
-        model.objective = map(model.reactions.index, [atpm, biomass])
-        assert model.objective == {atpm: 1., biomass: 1.}
+        model.objective = [model.reactions.index(reaction) for
+                           reaction in [atpm, biomass]]
+        assert su.linear_reaction_coefficients(model) == {atpm: 1.,
+                                                          biomass: 1.}
+
+    def test_problem_properties(self, model):
+        new_variable = model.problem.Variable("test_variable")
+        new_constraint = model.problem.Constraint(S.Zero,
+                                                  name="test_constraint")
+        model.add_cons_vars([new_variable, new_constraint])
+        assert "test_variable" in model.variables.keys()
+        assert "test_constraint" in model.constraints.keys()
+        model.remove_cons_vars([new_constraint, new_variable])
+        assert "test_variable" not in model.variables.keys()
+        assert "test_constraint" not in model.variables.keys()
+
+    def test_solution_data_frame(self, model):
+        solution = model.optimize().to_frame()
+        assert isinstance(solution, pd.DataFrame)
+        assert 'fluxes' in solution
+        assert 'reduced_costs' in solution
+
+    def test_model_medium(self, model):
+        # Add a dummy 'malformed' import reaction
+        bad_import = Reaction('bad_import')
+        bad_import.add_metabolites({model.metabolites.pyr_c: 1})
+        bad_import.bounds = (0, 42)
+        model.add_reaction(bad_import)
+
+        # Test basic setting and getting methods
+        medium = model.medium
+        model.medium = medium
+        assert model.medium == medium
+
+        # Test context management
+        with model:
+            # Ensure the bounds are correct beforehand
+            assert model.reactions.EX_glc__D_e.lower_bound == -10
+            assert model.reactions.bad_import.upper_bound == 42
+            assert model.reactions.EX_co2_e.lower_bound == -1000
+
+            # Make changes to the media
+            new_medium = model.medium
+            new_medium['EX_glc__D_e'] = 20
+            new_medium['bad_import'] = 24
+            del new_medium['EX_co2_e']
+
+            # Change the medium, make sure changes work
+            model.medium = new_medium
+            assert model.reactions.EX_glc__D_e.lower_bound == -20
+            assert model.reactions.bad_import.upper_bound == 24
+            assert model.reactions.EX_co2_e.lower_bound == 0
+
+        # Make sure changes revert after the contex
+        assert model.reactions.EX_glc__D_e.lower_bound == -10
+        assert model.reactions.bad_import.upper_bound == 42
+        assert model.reactions.EX_co2_e.lower_bound == -1000
+
+        new_medium['bogus_rxn'] = 0
+        with pytest.raises(KeyError):
+            model.medium = new_medium
+
+    def test_context_manager(self, model):
+        bounds0 = model.reactions[0].bounds
+        bounds1 = (1, 2)
+        bounds2 = (3, 4)
+
+        # Trigger a nested model context, ensuring that bounds are
+        # preserved at each level
+        with model:
+            model.reactions[0].bounds = bounds1
+            with model:
+                model.reactions[0].bounds = bounds2
+
+                assert model.reactions[0].bounds == bounds2
+            assert model.reactions[0].bounds == bounds1
+        assert model.reactions[0].bounds == bounds0
+
+    def test_repr_html_(self, model):
+        assert '<table>' in model._repr_html_()
+
 
+class TestStoichiometricMatrix:
+    """Test the simple replacement for ArrayBasedModel"""
 
- at pytest.mark.skipif(scipy is None, reason="scipy required for ArrayBasedModel")
-class TestCobraArrayModel:
+    @pytest.mark.skipif(not scipy, reason='Sparse array methods require scipy')
     def test_array_model(self, model):
+        """ legacy test """
         for matrix_type in ["scipy.dok_matrix", "scipy.lil_matrix"]:
             array_model = model.to_array_based_model(matrix_type=matrix_type)
             assert array_model.S[7, 0] == -1
@@ -551,7 +953,9 @@ class TestCobraArrayModel:
             assert len(array_model.reactions) == array_model.S.shape[1]
             assert array_model.S.shape == (m, n - 1)
 
+    @pytest.mark.skipif(not scipy, reason='Sparse array methods require scipy')
     def test_array_based_model_add(self, model):
+        """ legacy test """
         array_model = model.to_array_based_model()
         m = len(array_model.metabolites)
         n = len(array_model.reactions)
@@ -570,28 +974,32 @@ class TestCobraArrayModel:
             assert test_model.S[7, 0] == -1
             assert test_model.lower_bounds[n] == -3.14
 
-    def test_array_based_select(self, array_model):
-        atpm_select = array_model.reactions[array_model.lower_bounds > 0]
-        assert len(atpm_select) == 1
-        assert atpm_select[0].id == "ATPM"
-        assert len(
-            array_model.reactions[array_model.lower_bounds <= 0]) == len(
-            array_model.reactions) - 1
-        # mismatched dimensions should give an error
-        with pytest.raises(TypeError):
-            array_model.reactions[[True, False]]
-
-    def test_array_based_bounds_setting(self, array_model):
-        model = array_model
-        bounds = [0.0] * len(model.reactions)
-        model.lower_bounds = bounds
-        assert type(model.reactions[0].lower_bound) == float
-        assert abs(model.reactions[0].lower_bound) < 10 ** -5
-        model.upper_bounds[1] = 1234.0
-        assert abs(model.reactions[1].upper_bound - 1234.0) < 10 ** -5
-        model.upper_bounds[9:11] = [100.0, 200.0]
-        assert abs(model.reactions[9].upper_bound - 100.0) < 10 ** -5
-        assert abs(model.reactions[10].upper_bound - 200.0) < 10 ** -5
-        model.upper_bounds[9:11] = 123.0
-        assert abs(model.reactions[9].upper_bound - 123.0) < 10 ** -5
-        assert abs(model.reactions[10].upper_bound - 123.0) < 10 ** -5
+    def test_dense_matrix(self, model):
+        S = create_stoichiometric_matrix(model, array_type='dense', dtype=int)
+        assert S.dtype == int
+        assert numpy.allclose(S.max(), [59])
+
+        S_df = create_stoichiometric_matrix(
+            model, array_type='DataFrame', dtype=int)
+        assert S_df.values.dtype == int
+        assert numpy.all(S_df.columns == [r.id for r in model.reactions])
+        assert numpy.all(S_df.index == [m.id for m in model.metabolites])
+        assert numpy.allclose(S_df.values, S)
+
+        S = create_stoichiometric_matrix(model, array_type='dense',
+                                         dtype=float)
+        solution = model.optimize()
+        mass_balance = S.dot(solution.fluxes)
+        assert numpy.allclose(mass_balance, 0)
+
+    @pytest.mark.skipif(not scipy, reason='Sparse array methods require scipy')
+    def test_sparse_matrix(self, model):
+        sparse_types = ['dok', 'lil']
+
+        solution = model.optimize()
+        for sparse_type in sparse_types:
+            S = create_stoichiometric_matrix(model, array_type=sparse_type)
+            mass_balance = S.dot(solution.fluxes)
+            assert numpy.allclose(mass_balance, 0)
+
+            # Is this really the best way to get a vector of fluxes?
diff --git a/cobra/test/test_solver_model.py b/cobra/test/test_solver_model.py
new file mode 100644
index 0000000..37ca3b3
--- /dev/null
+++ b/cobra/test/test_solver_model.py
@@ -0,0 +1,800 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+import copy
+import os
+
+import numpy
+import optlang
+import pytest
+import six
+
+import cobra
+from cobra.core import Metabolite, Model, Reaction, Solution
+from cobra.util.solver import SolverNotFound, set_objective, solvers
+
+try:
+    import scipy
+except ImportError:
+    scipy = None
+
+
+solver_trials = ['glpk',
+                 pytest.mark.skipif('cplex' not in solvers,
+                                    reason='no cplex')]
+
+
+ at pytest.fixture(scope="function", params=solver_trials)
+def solved_model(request, model):
+    model.solver = request.param
+    solution = model.optimize(solution_type=Solution)
+    return solution, model
+
+
+class TestSolution:
+    def test_solution_contains_only_reaction_specific_values(self,
+                                                             solved_model):
+        solution, model = solved_model
+        reaction_ids = set([reaction.id for reaction in model.reactions])
+        if isinstance(solution, Solution):
+            assert set(solution.fluxes.index) == reaction_ids
+#            assert set(solution.reduced_costs.index) == reaction_ids
+        else:
+            raise TypeError(
+                "solutions of type {0:r} are untested".format(type(solution)))
+
+
+class TestReaction:
+    def test_str(self, model):
+        assert model.reactions[0].__str__().startswith('ACALD')
+
+    def test_add_metabolite(self, solved_model):
+        solution, model = solved_model
+        pgi_reaction = model.reactions.PGI
+        test_met = model.metabolites[0]
+        pgi_reaction.add_metabolites({test_met: 42}, combine=False)
+        assert pgi_reaction.metabolites[test_met] == 42
+        assert model.constraints[
+                   test_met.id].expression.as_coefficients_dict()[
+                   pgi_reaction.forward_variable] == 42
+        assert model.constraints[
+                   test_met.id].expression.as_coefficients_dict()[
+                   pgi_reaction.reverse_variable] == -42
+
+        pgi_reaction.add_metabolites({test_met: -10}, combine=True)
+        assert pgi_reaction.metabolites[test_met] == 32
+        assert model.constraints[
+                   test_met.id].expression.as_coefficients_dict()[
+                   pgi_reaction.forward_variable] == 32
+        assert model.constraints[
+                   test_met.id].expression.as_coefficients_dict()[
+                   pgi_reaction.reverse_variable] == -32
+
+        pgi_reaction.add_metabolites({test_met: 0}, combine=False)
+        with pytest.raises(KeyError):
+            pgi_reaction.metabolites[test_met]
+        assert model.constraints[
+                   test_met.id].expression.as_coefficients_dict()[
+                   pgi_reaction.forward_variable] == 0
+        assert model.constraints[
+                   test_met.id].expression.as_coefficients_dict()[
+                   pgi_reaction.reverse_variable] == 0
+
+    def test_removal_from_model_retains_bounds(self, model):
+        model_cp = model.copy()
+        reaction = model_cp.reactions.ACALD
+        assert reaction.model == model_cp
+        assert reaction.lower_bound == -1000.0
+        assert reaction.upper_bound == 1000.0
+        assert reaction._lower_bound == -1000.0
+        assert reaction._upper_bound == 1000.0
+        model_cp.remove_reactions([reaction])
+        assert reaction.model is None
+        assert reaction.lower_bound == -1000.0
+        assert reaction.upper_bound == 1000.0
+        assert reaction._lower_bound == -1000.0
+        assert reaction._upper_bound == 1000.0
+
+    def test_set_bounds_scenario_1(self, model):
+        acald_reaction = model.reactions.ACALD
+        assert acald_reaction.lower_bound == -1000.
+        assert acald_reaction.upper_bound == 1000.
+        assert acald_reaction.forward_variable.lb == 0.
+        assert acald_reaction.forward_variable.ub == 1000.
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 1000.
+        acald_reaction.upper_bound = acald_reaction.lower_bound - 100
+        assert acald_reaction.lower_bound == -1100.0
+        assert acald_reaction.upper_bound == -1100.0
+        assert acald_reaction.forward_variable.lb == 0
+        assert acald_reaction.forward_variable.ub == 0
+        assert acald_reaction.reverse_variable.lb == 1100.
+        assert acald_reaction.reverse_variable.ub == 1100.
+        acald_reaction.upper_bound = 100
+        assert acald_reaction.lower_bound == -1100.0
+        assert acald_reaction.upper_bound == 100
+        assert acald_reaction.forward_variable.lb == 0
+        assert acald_reaction.forward_variable.ub == 100
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 1100.0
+
+    def test_set_bounds_scenario_3(self, model):
+        reac = model.reactions.ACALD
+        reac.upper_bound = -10
+        reac.lower_bound = -10
+        assert reac.lower_bound == -10
+        assert reac.upper_bound == -10
+        reac.lower_bound = -9
+        assert reac.lower_bound == -9
+        assert reac.upper_bound == -9
+        reac.lower_bound = 2
+        assert reac.lower_bound == 2
+        assert reac.upper_bound == 2
+        reac.upper_bound = -10
+        assert reac.lower_bound == -10
+        assert reac.upper_bound == -10
+        reac.upper_bound = -11
+        assert reac.lower_bound == -11
+        assert reac.upper_bound == -11
+        reac.upper_bound = 2
+        assert reac.lower_bound == -11
+        assert reac.upper_bound == 2
+
+    def test_set_bounds_scenario_4(self, model):
+        reac = model.reactions.ACALD
+        reac.lower_bound = reac.upper_bound = 0
+        reac.lower_bound = 2
+        assert reac.lower_bound == 2
+        assert reac.upper_bound == 2
+        assert reac.forward_variable.lb == 2
+        assert reac.forward_variable.ub == 2
+        reac.knock_out()
+        reac.upper_bound = -2
+        assert reac.lower_bound == -2
+        assert reac.upper_bound == -2
+        assert reac.reverse_variable.lb == 2
+        assert reac.reverse_variable.ub == 2
+
+    def test_set_upper_before_lower_bound_to_0(self, model):
+        model.reactions.GAPD.upper_bound = 0
+        model.reactions.GAPD.lower_bound = 0
+        assert model.reactions.GAPD.lower_bound == 0
+        assert model.reactions.GAPD.upper_bound == 0
+        assert model.reactions.GAPD.forward_variable.lb == 0
+        assert model.reactions.GAPD.forward_variable.ub == 0
+        assert model.reactions.GAPD.reverse_variable.lb == 0
+        assert model.reactions.GAPD.reverse_variable.ub == 0
+
+    def test_set_bounds_scenario_2(self, model):
+        acald_reaction = model.reactions.ACALD
+        assert acald_reaction.lower_bound == -1000.
+        assert acald_reaction.upper_bound == 1000.
+        assert acald_reaction.forward_variable.lb == 0.
+        assert acald_reaction.forward_variable.ub == 1000.
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 1000.
+        acald_reaction.lower_bound = acald_reaction.upper_bound + 100
+        assert acald_reaction.lower_bound == 1100.0
+        assert acald_reaction.upper_bound == 1100.0
+        assert acald_reaction.forward_variable.lb == 1100.0
+        assert acald_reaction.forward_variable.ub == 1100.0
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 0
+        acald_reaction.lower_bound = -100
+        assert acald_reaction.lower_bound == -100.
+        assert acald_reaction.upper_bound == 1100.
+        assert acald_reaction.forward_variable.lb == 0
+        assert acald_reaction.forward_variable.ub == 1100.
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 100
+
+    def test_change_bounds(self, model):
+        reac = model.reactions.ACALD
+        reac.bounds = (2, 2)
+        assert reac.lower_bound == 2
+        assert reac.upper_bound == 2
+        with model:
+            reac.bounds = (5, 5)
+            assert reac.lower_bound == 5
+            assert reac.upper_bound == 5
+        assert reac.lower_bound == 2
+        assert reac.upper_bound == 2
+
+    def test_make_irreversible(self, model):
+        acald_reaction = model.reactions.ACALD
+        assert acald_reaction.lower_bound == -1000.
+        assert acald_reaction.upper_bound == 1000.
+        assert acald_reaction.forward_variable.lb == 0.
+        assert acald_reaction.forward_variable.ub == 1000.
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 1000.
+        acald_reaction.lower_bound = 0
+        assert acald_reaction.lower_bound == 0
+        assert acald_reaction.upper_bound == 1000.
+        assert acald_reaction.forward_variable.lb == 0
+        assert acald_reaction.forward_variable.ub == 1000.0
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 0
+        acald_reaction.lower_bound = -100
+        assert acald_reaction.lower_bound == -100.
+        assert acald_reaction.upper_bound == 1000.
+        assert acald_reaction.forward_variable.lb == 0
+        assert acald_reaction.forward_variable.ub == 1000.
+        assert acald_reaction.reverse_variable.lb == 0
+        assert acald_reaction.reverse_variable.ub == 100
+
+    def test_make_reversible(self, model):
+        pfk_reaction = model.reactions.PFK
+        assert pfk_reaction.lower_bound == 0.
+        assert pfk_reaction.upper_bound == 1000.
+        assert pfk_reaction.forward_variable.lb == 0.
+        assert pfk_reaction.forward_variable.ub == 1000.
+        assert pfk_reaction.reverse_variable.lb == 0
+        assert pfk_reaction.reverse_variable.ub == 0
+        pfk_reaction.lower_bound = -100.
+        assert pfk_reaction.lower_bound == -100.
+        assert pfk_reaction.upper_bound == 1000.
+        assert pfk_reaction.forward_variable.lb == 0
+        assert pfk_reaction.forward_variable.ub == 1000.0
+        assert pfk_reaction.reverse_variable.lb == 0
+        assert pfk_reaction.reverse_variable.ub == 100.
+        pfk_reaction.lower_bound = 0
+        assert pfk_reaction.lower_bound == 0
+        assert pfk_reaction.upper_bound == 1000.
+        assert pfk_reaction.forward_variable.lb == 0
+        assert pfk_reaction.forward_variable.ub == 1000.
+        assert pfk_reaction.reverse_variable.lb == 0
+        assert pfk_reaction.reverse_variable.ub == 0
+
+    def test_make_irreversible_irreversible_to_the_other_side(self, model):
+        pfk_reaction = model.reactions.PFK
+        assert pfk_reaction.lower_bound == 0.
+        assert pfk_reaction.upper_bound == 1000.
+        assert pfk_reaction.forward_variable.lb == 0.
+        assert pfk_reaction.forward_variable.ub == 1000.
+        assert pfk_reaction.reverse_variable.lb == 0
+        assert pfk_reaction.reverse_variable.ub == 0
+        pfk_reaction.upper_bound = -100.
+        assert pfk_reaction.forward_variable.lb == 0
+        assert pfk_reaction.forward_variable.ub == 0
+        assert pfk_reaction.reverse_variable.lb == 100
+        assert pfk_reaction.reverse_variable.ub == 100
+        pfk_reaction.lower_bound = -1000.
+        assert pfk_reaction.lower_bound == -1000.
+        assert pfk_reaction.upper_bound == -100.
+        assert pfk_reaction.forward_variable.lb == 0
+        assert pfk_reaction.forward_variable.ub == 0
+        assert pfk_reaction.reverse_variable.lb == 100
+        assert pfk_reaction.reverse_variable.ub == 1000.
+
+    def test_make_lhs_irreversible_reversible(self, model):
+        rxn = Reaction('test')
+        rxn.add_metabolites(
+            {model.metabolites[0]: -1., model.metabolites[1]: 1.})
+        rxn.lower_bound = -1000.
+        rxn.upper_bound = -100
+        model.add_reaction(rxn)
+        assert rxn.lower_bound == -1000.
+        assert rxn.upper_bound == -100.
+        assert rxn.forward_variable.lb == 0.
+        assert rxn.forward_variable.ub == 0.
+        assert rxn.reverse_variable.lb == 100.
+        assert rxn.reverse_variable.ub == 1000.
+        rxn.upper_bound = 666.
+        assert rxn.lower_bound == -1000.
+        assert rxn.upper_bound == 666.
+        assert rxn.forward_variable.lb == 0.
+        assert rxn.forward_variable.ub == 666
+        assert rxn.reverse_variable.lb == 0.
+        assert rxn.reverse_variable.ub == 1000.
+
+    def test_model_less_reaction(self, model):
+        model.optimize(solution_type=Solution)
+        for reaction in model.reactions:
+            assert isinstance(reaction.flux, float)
+            assert isinstance(reaction.reduced_cost, float)
+        for reaction in model.reactions:
+            model.remove_reactions([reaction])
+            with pytest.raises(RuntimeError):
+                reaction.flux
+            with pytest.raises(RuntimeError):
+                reaction.reduced_cost
+
+    def test_knockout(self, model):
+        original_bounds = dict()
+        for reaction in model.reactions:
+            original_bounds[reaction.id] = (
+                reaction.lower_bound, reaction.upper_bound)
+            reaction.knock_out()
+            assert reaction.lower_bound == 0
+            assert reaction.upper_bound == 0
+        for k, (lb, ub) in six.iteritems(original_bounds):
+            model.reactions.get_by_id(k).lower_bound = lb
+            model.reactions.get_by_id(k).upper_bound = ub
+        for reaction in model.reactions:
+            assert reaction.lower_bound == original_bounds[reaction.id][0]
+            assert reaction.upper_bound == original_bounds[reaction.id][1]
+        with model:
+            for reaction in model.reactions:
+                original_bounds[reaction.id] = (
+                    reaction.lower_bound, reaction.upper_bound)
+                reaction.knock_out()
+                assert reaction.lower_bound == 0
+                assert reaction.upper_bound == 0
+        for reaction in model.reactions:
+            assert reaction.lower_bound == original_bounds[reaction.id][0]
+            assert reaction.upper_bound == original_bounds[reaction.id][1]
+
+    def test_reaction_without_model(self):
+        r = Reaction('blub')
+        assert r.flux_expression is None
+        assert r.forward_variable is None
+        assert r.reverse_variable is None
+
+    def test_weird_left_to_right_reaction_issue(self, tiny_toy_model):
+        d1 = tiny_toy_model.reactions.get_by_id('ex1')
+        assert not d1.reversibility
+        assert d1.lower_bound == -1000
+        assert d1._lower_bound == -1000
+        assert d1.upper_bound == 0
+        assert d1._upper_bound == 0
+        with tiny_toy_model:
+            d1.knock_out()
+            assert d1.lower_bound == 0
+            assert d1._lower_bound == 0
+            assert d1.upper_bound == 0
+            assert d1._upper_bound == 0
+        assert d1.lower_bound == -1000
+        assert d1._lower_bound == -1000
+        assert d1.upper_bound == 0
+        assert d1._upper_bound == 0
+
+    def test_one_left_to_right_reaction_set_positive_ub(self, tiny_toy_model):
+        d1 = tiny_toy_model.reactions.get_by_id('ex1')
+        assert d1.reverse_variable.lb == 0
+        assert d1.reverse_variable.ub == 1000
+        assert d1._lower_bound == -1000
+        assert d1.lower_bound == -1000
+        assert d1._upper_bound == 0
+        assert d1.upper_bound == 0
+        assert d1.forward_variable.lb == 0
+        assert d1.forward_variable.ub == 0
+        d1.upper_bound = .1
+        assert d1.forward_variable.lb == 0
+        assert d1.forward_variable.ub == .1
+        assert d1.reverse_variable.lb == 0
+        assert d1.reverse_variable.ub == 1000
+        assert d1._lower_bound == -1000
+        assert d1.upper_bound == .1
+        assert d1._lower_bound == -1000
+        assert d1.upper_bound == .1
+
+    def test_irrev_reaction_set_negative_lb(self, model):
+        assert not model.reactions.PFK.reversibility
+        assert model.reactions.PFK.lower_bound == 0
+        assert model.reactions.PFK.upper_bound == 1000.0
+        assert model.reactions.PFK.forward_variable.lb == 0
+        assert model.reactions.PFK.forward_variable.ub == 1000.0
+        assert model.reactions.PFK.reverse_variable.lb == 0
+        assert model.reactions.PFK.reverse_variable.ub == 0
+        model.reactions.PFK.lower_bound = -1000
+        assert model.reactions.PFK.lower_bound == -1000
+        assert model.reactions.PFK.upper_bound == 1000.0
+        assert model.reactions.PFK.forward_variable.lb == 0
+        assert model.reactions.PFK.forward_variable.ub == 1000.0
+        assert model.reactions.PFK.reverse_variable.lb == 0
+        assert model.reactions.PFK.reverse_variable.ub == 1000
+
+    def test_twist_irrev_right_to_left_reaction_to_left_to_right(self, model):
+        assert not model.reactions.PFK.reversibility
+        assert model.reactions.PFK.lower_bound == 0
+        assert model.reactions.PFK.upper_bound == 1000.0
+        assert model.reactions.PFK.forward_variable.lb == 0
+        assert model.reactions.PFK.forward_variable.ub == 1000.0
+        assert model.reactions.PFK.reverse_variable.lb == 0
+        assert model.reactions.PFK.reverse_variable.ub == 0
+        model.reactions.PFK.lower_bound = -1000
+        model.reactions.PFK.upper_bound = 0
+        assert model.reactions.PFK.lower_bound == -1000
+        assert model.reactions.PFK.upper_bound == 0
+        assert model.reactions.PFK.forward_variable.lb == 0
+        assert model.reactions.PFK.forward_variable.ub == 0
+        assert model.reactions.PFK.reverse_variable.lb == 0
+        assert model.reactions.PFK.reverse_variable.ub == 1000
+
+    def test_set_lb_higher_than_ub_sets_ub_to_new_lb(self, model):
+        for reaction in model.reactions:
+            assert reaction.lower_bound <= reaction.upper_bound
+            reaction.lower_bound = reaction.upper_bound + 100
+            assert reaction.lower_bound == reaction.upper_bound
+
+    def test_set_ub_lower_than_lb_sets_lb_to_new_ub(self, model):
+        for reaction in model.reactions:
+            assert reaction.lower_bound <= reaction.upper_bound
+            reaction.upper_bound = reaction.lower_bound - 100
+            assert reaction.lower_bound == reaction.upper_bound
+
+    def test_add_metabolites_combine_true(self, model):
+        test_metabolite = Metabolite('test')
+        for reaction in model.reactions:
+            reaction.add_metabolites({test_metabolite: -66}, combine=True)
+            assert reaction.metabolites[test_metabolite] == -66
+            assert model.constraints['test'].expression.has(
+                -66. * reaction.forward_variable)
+            assert model.constraints['test'].expression.has(
+                66. * reaction.reverse_variable)
+            already_included_metabolite = \
+                list(reaction.metabolites.keys())[0]
+            previous_coefficient = reaction.get_coefficient(
+                already_included_metabolite.id)
+            reaction.add_metabolites({already_included_metabolite: 10},
+                                     combine=True)
+            new_coefficient = previous_coefficient + 10
+            assert reaction.metabolites[
+                       already_included_metabolite] == new_coefficient
+            assert model.constraints[
+                already_included_metabolite.id].expression.has(
+                new_coefficient * reaction.forward_variable)
+            assert model.constraints[
+                already_included_metabolite.id].expression.has(
+                -1 * new_coefficient * reaction.reverse_variable)
+
+    @pytest.mark.xfail(reason='non-deterministic test')
+    def test_add_metabolites_combine_false(self, model):
+        test_metabolite = Metabolite('test')
+        for reaction in model.reactions:
+            reaction.add_metabolites({test_metabolite: -66}, combine=False)
+            assert reaction.metabolites[test_metabolite] == -66
+            assert model.constraints['test'].expression.has(
+                -66. * reaction.forward_variable)
+            assert model.constraints['test'].expression.has(
+                66. * reaction.reverse_variable)
+            already_included_metabolite = \
+                list(reaction.metabolites.keys())[0]
+            reaction.add_metabolites({already_included_metabolite: 10},
+                                     combine=False)
+            assert reaction.metabolites[already_included_metabolite] == 10
+            assert model.constraints[
+                already_included_metabolite.id].expression.has(
+                10 * reaction.forward_variable)
+            assert model.constraints[
+                already_included_metabolite.id].expression.has(
+                -10 * reaction.reverse_variable)
+
+    # def test_pop(self, model):
+    #     pgi = model.reactions.PGI
+    #     g6p = model.metabolites.get_by_id("g6p_c")
+    #     f6p = model.metabolites.get_by_id("f6p_c")
+    #     g6p_expr = model.solver.constraints["g6p_c"].expression
+    #     g6p_coef = pgi.pop("g6p_c")
+    #     assert g6p not in pgi.metabolites
+    #     actual = model.solver.constraints[
+    #         "g6p_c"].expression.as_coefficients_dict()
+    #     expected = (g6p_expr - g6p_coef * pgi.flux_expression
+    #                 ).as_coefficients_dict()
+    #     assert actual == expected
+    #     assert pgi.metabolites[f6p] == 1
+    #
+    #     f6p_expr = model.solver.constraints["f6p_c"].expression
+    #     f6p_coef = pgi.pop(f6p)
+    #     assert f6p not in pgi.metabolites
+    #     assert model.solver.constraints[
+    #                "f6p_c"].expression.as_coefficients_dict() == (
+    #                f6p_expr - f6p_coef * pgi.flux_expression
+    #            ).as_coefficients_dict()
+
+    def test_remove_from_model(self, model):
+        pgi = model.reactions.PGI
+        g6p = model.metabolites.g6p_c
+
+        with model:
+            pgi.remove_from_model()
+            assert pgi.model is None
+            assert "PGI" not in model.reactions
+            assert pgi.id not in model.variables
+            assert pgi.reverse_id not in model.variables
+            assert pgi not in g6p.reactions
+
+        assert "PGI" in model.reactions
+        assert pgi.id in model.variables
+        assert pgi.reverse_id in model.variables
+        assert pgi.forward_variable.problem is model.solver
+        assert pgi in g6p.reactions
+        assert g6p in pgi.metabolites
+
+    def test_change_id_is_reflected_in_solver(self, model):
+        for i, reaction in enumerate(model.reactions):
+            old_reaction_id = reaction.id
+            assert model.variables[
+                       old_reaction_id].name == old_reaction_id
+            assert old_reaction_id in model.variables
+            new_reaction_id = reaction.id + '_' + str(i)
+            reaction.id = new_reaction_id
+            assert reaction.id == new_reaction_id
+            assert not (old_reaction_id in model.variables)
+            assert reaction.id in model.variables
+            assert reaction.reverse_id in model.variables
+            name = model.variables[reaction.id].name
+            assert name == reaction.id
+
+
+class TestSolverBasedModel:
+    def test_objective_coefficient_reflects_changed_objective(self, model):
+        biomass_r = model.reactions.get_by_id('Biomass_Ecoli_core')
+        assert biomass_r.objective_coefficient == 1
+        model.objective = "PGI"
+        assert biomass_r.objective_coefficient == 0
+        assert model.reactions.PGI.objective_coefficient == 1
+
+    def test_change_objective_through_objective_coefficient(self, model):
+        biomass_r = model.reactions.get_by_id('Biomass_Ecoli_core')
+        pgi = model.reactions.PGI
+        pgi.objective_coefficient = 2
+        coef_dict = model.objective.expression.as_coefficients_dict()
+        # Check that objective has been updated
+        assert coef_dict[pgi.forward_variable] == 2
+        assert coef_dict[pgi.reverse_variable] == -2
+        # Check that original objective is still in there
+        assert coef_dict[biomass_r.forward_variable] == 1
+        assert coef_dict[biomass_r.reverse_variable] == -1
+
+    def test_transfer_objective(self, model):
+        new_mod = Model("new model")
+        new_mod.add_reactions(model.reactions)
+        new_mod.objective = model.objective
+        assert (set(str(x) for x in model.objective.expression.args) == set(
+            str(x) for x in new_mod.objective.expression.args))
+        new_mod.slim_optimize()
+        assert abs(new_mod.objective.value - 0.874) < 0.001
+
+    def test_model_from_other_model(self, model):
+        model = Model(id_or_model=model)
+        for reaction in model.reactions:
+            assert reaction == model.reactions.get_by_id(reaction.id)
+
+    def test_add_reactions(self, model):
+        r1 = Reaction('r1')
+        r1.add_metabolites({Metabolite('A'): -1, Metabolite('B'): 1})
+        r1.lower_bound, r1.upper_bound = -999999., 999999.
+        r2 = Reaction('r2')
+        r2.add_metabolites(
+            {Metabolite('A'): -1, Metabolite('C'): 1, Metabolite('D'): 1})
+        r2.lower_bound, r2.upper_bound = 0., 999999.
+        model.add_reactions([r1, r2])
+        r2.objective_coefficient = 3.
+        assert r2.objective_coefficient == 3.
+        assert model.reactions[-2] == r1
+        assert model.reactions[-1] == r2
+        assert isinstance(model.reactions[-2].reverse_variable,
+                          model.problem.Variable)
+        coefficients_dict = model.objective.expression. \
+            as_coefficients_dict()
+        biomass_r = model.reactions.get_by_id('Biomass_Ecoli_core')
+        assert coefficients_dict[biomass_r.forward_variable] == 1.
+        assert coefficients_dict[biomass_r.reverse_variable] == -1.
+        assert coefficients_dict[
+                   model.reactions.r2.forward_variable] == 3.
+        assert coefficients_dict[
+                   model.reactions.r2.reverse_variable] == -3.
+
+    def test_add_cobra_reaction(self, model):
+        r = cobra.Reaction(id="c1")
+        model.add_reaction(r)
+        assert isinstance(model.reactions.c1, Reaction)
+
+    def test_all_objects_point_to_all_other_correct_objects(self, model):
+        for reaction in model.reactions:
+            assert reaction.model == model
+            for gene in reaction.genes:
+                assert gene == model.genes.get_by_id(gene.id)
+                assert gene.model == model
+                for reaction2 in gene.reactions:
+                    assert reaction2.model == model
+                    assert reaction2 == model.reactions.get_by_id(
+                        reaction2.id)
+
+            for metabolite in reaction.metabolites:
+                assert metabolite.model == model
+                assert metabolite == model.metabolites.get_by_id(
+                    metabolite.id)
+                for reaction2 in metabolite.reactions:
+                    assert reaction2.model == model
+                    assert reaction2 == model.reactions.get_by_id(
+                        reaction2.id)
+
+    def test_objects_point_to_correct_other_after_copy(self, model):
+        for reaction in model.reactions:
+            assert reaction.model == model
+            for gene in reaction.genes:
+                assert gene == model.genes.get_by_id(gene.id)
+                assert gene.model == model
+                for reaction2 in gene.reactions:
+                    assert reaction2.model == model
+                    assert reaction2 == model.reactions.get_by_id(
+                        reaction2.id)
+
+            for metabolite in reaction.metabolites:
+                assert metabolite.model == model
+                assert metabolite == model.metabolites.get_by_id(
+                    metabolite.id)
+                for reaction2 in metabolite.reactions:
+                    assert reaction2.model == model
+                    assert reaction2 == model.reactions.get_by_id(
+                        reaction2.id)
+
+    def test_remove_reactions(self, model):
+        reactions_to_remove = model.reactions[10:30]
+        assert all([reaction.model is model for reaction in
+                    reactions_to_remove])
+        assert all(
+            [model.reactions.get_by_id(reaction.id) == reaction for
+             reaction in reactions_to_remove])
+
+        model.remove_reactions(reactions_to_remove)
+        assert all(
+            [reaction.model is None for reaction in reactions_to_remove])
+        for reaction in reactions_to_remove:
+            assert reaction.id not in list(
+                model.variables.keys())
+
+        model.add_reactions(reactions_to_remove)
+        for reaction in reactions_to_remove:
+            assert reaction in model.reactions
+
+    def test_objective(self, model):
+        obj = model.objective
+        assert {var.name: coef for var, coef in
+                obj.expression.as_coefficients_dict().items()} == {
+                   'Biomass_Ecoli_core_reverse_2cdba': -1,
+                   'Biomass_Ecoli_core': 1}
+        assert obj.direction == "max"
+
+    def test_change_objective(self, model):
+        expression = 1.0 * model.variables['ENO'] + \
+                     1.0 * model.variables['PFK']
+        model.objective = model.problem.Objective(
+            expression)
+        assert model.objective.expression == expression
+        model.objective = "ENO"
+        eno_obj = model.problem.Objective(
+            model.reactions.ENO.flux_expression, direction="max")
+        pfk_obj = model.problem.Objective(
+            model.reactions.PFK.flux_expression, direction="max")
+        assert model.objective == eno_obj
+
+        with model:
+            model.objective = "PFK"
+            assert model.objective == pfk_obj
+        assert model.objective == eno_obj
+        expression = model.objective.expression
+        atpm = model.reactions.get_by_id("ATPM")
+        biomass = model.reactions.get_by_id("Biomass_Ecoli_core")
+        with model:
+            model.objective = atpm
+        assert model.objective.expression == expression
+        with model:
+            atpm.objective_coefficient = 1
+            biomass.objective_coefficient = 2
+        assert model.objective.expression == expression
+
+        with model:
+            set_objective(model, model.problem.Objective(
+                atpm.flux_expression))
+            assert model.objective.expression == atpm.flux_expression
+        assert model.objective.expression == expression
+
+        expression = model.objective.expression
+        with model:
+            with model:  # Test to make sure nested contexts are OK
+                set_objective(model, atpm.flux_expression,
+                              additive=True)
+                assert (model.objective.expression ==
+                        expression + atpm.flux_expression)
+        assert model.objective.expression == expression
+
+    def test_set_reaction_objective(self, model):
+        model.objective = model.reactions.ACALD
+        assert str(model.objective.expression) == str(
+            1.0 * model.reactions.ACALD.forward_variable -
+            1.0 * model.reactions.ACALD.reverse_variable)
+
+    def test_set_reaction_objective_str(self, model):
+        model.objective = model.reactions.ACALD.id
+        assert str(model.objective.expression) == str(
+            1.0 * model.reactions.ACALD.forward_variable -
+            1.0 * model.reactions.ACALD.reverse_variable)
+
+    def test_invalid_objective_raises(self, model):
+        with pytest.raises(ValueError):
+            setattr(model, 'objective', 'This is not a valid objective!')
+        with pytest.raises(TypeError):
+            setattr(model, 'objective', 3.)
+
+    @pytest.mark.skipif("cplex" not in solvers, reason="need cplex")
+    def test_solver_change(self, model):
+        solver_id = id(model.solver)
+        problem_id = id(model.solver.problem)
+        solution = model.optimize(solution_type=Solution).fluxes
+        model.solver = "cplex"
+        assert id(model.solver) != solver_id
+        assert id(model.solver.problem) != problem_id
+        new_solution = model.optimize(solution_type=Solution).fluxes
+        for key in list(solution.keys()):
+            assert round(abs(new_solution[key] - solution[key]),
+                         7) == 0
+
+    @pytest.mark.skipif("cplex" not in solvers, reason="need cplex")
+    def test_solver_change_with_optlang_interface(self, model):
+        solver_id = id(model.solver)
+        problem_id = id(model.solver.problem)
+        solution = model.optimize(solution_type=Solution).fluxes
+        model.solver = optlang.cplex_interface
+        assert id(model.solver) != solver_id
+        assert id(model.solver.problem) != problem_id
+        new_solution = model.optimize(solution_type=Solution).fluxes
+        for key in list(solution.keys()):
+            assert round(abs(new_solution[key] - solution[key]),
+                         7) == 0
+
+    def test_no_change_for_same_solver(self, model):
+        solver_id = id(model.solver)
+        problem_id = id(model.solver.problem)
+        model.solver = "glpk"
+        assert id(model.solver) == solver_id
+        assert id(model.solver.problem) == problem_id
+
+    def test_invalid_solver_change_raises(self, model):
+        with pytest.raises(SolverNotFound):
+            setattr(model, 'solver', [1, 2, 3])
+        with pytest.raises(SolverNotFound):
+            setattr(model, 'solver',
+                    'ThisIsDefinitelyNotAvalidSolver')
+        with pytest.raises(SolverNotFound):
+            setattr(model, 'solver', os)
+
+    @pytest.mark.skipif('cplex' not in solvers, reason='no cplex')
+    def test_change_solver_to_cplex_and_check_copy_works(self, model):
+        assert round(abs(model.optimize().f - 0.8739215069684306), 7) == 0
+        model_copy = model.copy()
+        assert round(abs(model_copy.optimize().f - 0.8739215069684306),
+                     7) == 0
+        # Second, change existing glpk based model to cplex
+        model.solver = 'cplex'
+        assert round(abs(model.optimize().f - 0.8739215069684306),
+                     7) == 0
+        model_copy = copy.copy(model)
+        assert round(abs(model_copy.optimize().f - 0.8739215069684306),
+                     7) == 0
+
+    def test_copy_preserves_existing_solution(self, solved_model):
+        solution, model = solved_model
+        model_cp = copy.copy(model)
+        primals_original = [variable.primal for variable in
+                            model.variables]
+        primals_copy = [variable.primal for variable in
+                        model_cp.variables]
+        abs_diff = abs(
+            numpy.array(primals_copy) - numpy.array(primals_original))
+        assert not any(abs_diff > 1e-6)
+
+
+class TestMetabolite:
+    def test_set_id(self, solved_model):
+        solution, model = solved_model
+        met = Metabolite("test")
+        with pytest.raises(TypeError):
+            setattr(met, 'id', 1)
+        model.add_metabolites([met])
+        with pytest.raises(ValueError):
+            setattr(met, "id", 'g6p_c')
+        met.id = "test2"
+        assert "test2" in model.metabolites
+        assert "test" not in model.metabolites
+
+    def test_remove_from_model(self, solved_model):
+        solution, model = solved_model
+        met = model.metabolites.get_by_id("g6p_c")
+        met.remove_from_model()
+        assert not (met.id in model.metabolites)
+        assert not (met.id in model.constraints)
diff --git a/cobra/test/test_solver_utils.py b/cobra/test/test_solver_utils.py
new file mode 100644
index 0000000..52710a8
--- /dev/null
+++ b/cobra/test/test_solver_utils.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import optlang
+import pytest
+
+import cobra.util.solver as su
+from cobra.test.conftest import model
+
+stable_optlang = ["glpk", "cplex", "gurobi"]
+optlang_solvers = ["optlang-" + s for s in stable_optlang if s in su.solvers]
+
+
+class TestHelpers:
+    def test_solver_list(self):
+        assert len(su.solvers) >= 1
+        assert "glpk" in su.solvers
+
+    def test_interface_str(self):
+        assert su.interface_to_str("nonsense") == "nonsense"
+        assert su.interface_to_str("optlang.glpk_interface") == "glpk"
+        assert su.interface_to_str("optlang-cplex") == "cplex"
+
+    def test_solver_name(self):
+        assert su.get_solver_name() == "glpk"
+
+    def test_choose_solver(self, model):
+        legacy, so = su.choose_solver(model)
+        assert not legacy
+        assert su.interface_to_str(so) == "glpk"
+        legacy, so = su.choose_solver(model, "optlang-glpk")
+        assert not legacy
+        assert su.interface_to_str(so) == "glpk"
+        assert su.choose_solver(model, "cglpk")[0]
+
+        if any(s in su.solvers for s in su.qp_solvers):
+            legacy, qp_choice = su.choose_solver(model, qp=True)
+            assert not legacy
+            assert su.interface_to_str(qp_choice) in su.qp_solvers
+        else:
+            with pytest.raises(su.SolverNotFound):
+                su.choose_solver(model, qp=True)
+
+
+class TestObjectiveHelpers:
+    def test_linear_reaction_coefficients(self, model):
+        coefficients = su.linear_reaction_coefficients(model)
+        assert coefficients == {model.reactions.Biomass_Ecoli_core: 1}
+
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_fail_non_linear_reaction_coefficients(self, model, solver):
+        model.solver = solver
+        try:
+            model.objective = model.problem.Objective(
+                model.reactions.ATPM.flux_expression ** 2
+            )
+        except ValueError:
+            pass
+        else:
+            coefficients = su.linear_reaction_coefficients(model)
+            assert coefficients == {}
+            with pytest.raises(ValueError):
+                model.reactions.ACALD.objective_coefficient = 1
+
+
+class TestSolverMods:
+    def test_add_remove(self, model):
+        v = model.variables
+        new_var = model.problem.Variable("test_var", lb=-10, ub=-10)
+        new_constraint = model.problem.Constraint(
+            v.PGK - new_var, name="test_constraint", lb=0)
+
+        su.add_cons_vars_to_problem(model, [new_var, new_constraint])
+        assert "test_var" in model.variables.keys()
+        assert "test_constraint" in model.constraints.keys()
+
+        su.remove_cons_vars_from_problem(model, [new_var, new_constraint])
+        assert "test_var" not in model.variables.keys()
+        assert "test_constraint" not in model.constraints.keys()
+
+    def test_add_remove_in_context(self, model):
+        v = model.variables
+        new_var = model.problem.Variable("test_var", lb=-10, ub=-10)
+
+        with model:
+            su.add_cons_vars_to_problem(model, [new_var])
+            su.remove_cons_vars_from_problem(model, [v.PGM])
+            assert "test_var" in model.variables.keys()
+            assert "PGM" not in model.variables.keys()
+
+        assert "test_var" not in model.variables.keys()
+        assert "PGM" in model.variables.keys()
+
+    def test_absolute_expression(self, model):
+        v = model.variables
+        with model:
+            parts = su.add_absolute_expression(
+                model, 2 * v.PGM, name="test", ub=100)
+            assert len(parts) == 3
+            assert "test" in model.variables.keys()
+            assert "abs_pos_test" in model.constraints.keys()
+            assert "abs_neg_test" in model.constraints.keys()
+        assert "test" not in model.variables.keys()
+        assert "abs_pos_test" not in model.constraints.keys()
+        assert "abs_neg_test" not in model.constraints.keys()
+
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_fix_objective_as_constraint(self, solver, model):
+        model.solver = solver
+        with model as m:
+            su.fix_objective_as_constraint(model, 1.0)
+            constraint_name = m.constraints[-1]
+            assert abs(m.constraints[-1].expression -
+                       m.objective.expression) < 1e-6
+        assert constraint_name not in m.constraints
+        su.fix_objective_as_constraint(model)
+        constraint_name = model.constraints[-1]
+        assert abs(model.constraints[-1].expression -
+                   model.objective.expression) < 1e-6
+        assert constraint_name in model.constraints
+
+    @pytest.mark.parametrize("solver", optlang_solvers)
+    def test_fix_objective_as_constraint_minimize(self, solver, model):
+        model.reactions.Biomass_Ecoli_core.bounds = (0.1, 0.1)
+        minimize_glucose = model.problem.Objective(
+            model.reactions.EX_glc__D_e.flux_expression,
+            direction='min')
+        su.set_objective(model, minimize_glucose)
+        su.fix_objective_as_constraint(model)
+        fx_name = 'fixed_objective_{}'.format(model.objective.name)
+        constr = model.constraints
+        assert (constr[fx_name].lb, constr[fx_name].ub) == (
+            None, model.solver.objective.value)
diff --git a/cobra/test/test_solvers.py b/cobra/test/test_solvers.py
index 4b32eab..d0e2d24 100644
--- a/cobra/test/test_solvers.py
+++ b/cobra/test/test_solvers.py
@@ -1,6 +1,11 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
 import pytest
-from cobra.core import Model, Reaction, Metabolite
+
 from cobra import solvers
+from cobra.core import Metabolite, Model, Reaction
+
 from .conftest import model
 
 try:
@@ -110,13 +115,13 @@ class TestCobraSolver:
         constraint._bound = 2.5
         x = Reaction("x")
         x.lower_bound = 0.
-        x.objective_coefficient = 1.
         x.add_metabolites({constraint: 2.5})
         y = Reaction("y")
         y.lower_bound = 0.
-        y.objective_coefficient = 1.
         y.add_metabolites({constraint: 1.})
         cobra_model.add_reactions([x, y])
+        x.objective_coefficient = 1.
+        y.objective_coefficient = 1.
         float_sol = solver.solve(cobra_model)
         # add an integer constraint
         y.variable_kind = "integer"
@@ -156,9 +161,9 @@ class TestCobraSolver:
         x.add_metabolites({c: 1})
         z = Reaction("z")
         z.add_metabolites({c: 1})
-        z.objective_coefficient = 1
         m = Model("test_model")
         m.add_reactions([x, y, z])
+        z.objective_coefficient = 1
         # change an existing coefficient
         lp = solver.create_problem(m)
         solver.solve_problem(lp)
@@ -218,15 +223,15 @@ class TestCobraSolver:
         c = Metabolite("c")
         c._bound = 2
         x = Reaction("x")
-        x.objective_coefficient = -0.5
         x.lower_bound = 0.
         y = Reaction("y")
-        y.objective_coefficient = -0.5
         y.lower_bound = 0.
         x.add_metabolites({c: 1})
         y.add_metabolites({c: 1})
         m = Model()
         m.add_reactions([x, y])
+        x.objective_coefficient = -0.5
+        y.objective_coefficient = -0.5
         lp = solver.create_problem(m)
         quadratic_obj = scipy.sparse.eye(2) * 2
         solver.set_quadratic_objective(lp, quadratic_obj)
diff --git a/cobra/test/test_dictlist.py b/cobra/test/test_util.py
similarity index 73%
rename from cobra/test/test_dictlist.py
rename to cobra/test/test_util.py
index e7e072b..d04fc65 100644
--- a/cobra/test/test_dictlist.py
+++ b/cobra/test/test_util.py
@@ -1,8 +1,18 @@
-import pytest
-from copy import deepcopy, copy
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+from builtins import zip
+
 import re
+from copy import copy, deepcopy
+from pickle import HIGHEST_PROTOCOL, dumps, loads
+
+import pytest
+from six.moves import range
+
 from cobra import DictList, Object
-from pickle import loads, dumps, HIGHEST_PROTOCOL
+from cobra.util.version_info import (
+    get_sys_info, get_pkg_info, show_versions, SYS_ORDER, PKG_ORDER)
 
 
 @pytest.fixture(scope="function")
@@ -43,6 +53,16 @@ class TestDictList:
         assert "o3" not in a
         assert "o3" in b
 
+    def test_get_by_any(self, dict_list):
+        obj, test_list = dict_list
+        assert test_list.get_by_any(0) == [obj]
+        assert test_list.get_by_any('test1') == [obj]
+        with pytest.raises(KeyError):
+            test_list.get_by_any('not-in-list')
+        with pytest.raises(TypeError):
+            test_list.get_by_any(1.1)
+        assert test_list.get_by_any(obj) == [obj]
+
     def test_append(self, dict_list):
         obj, test_list = dict_list
         obj2 = Object("test2")
@@ -102,6 +122,25 @@ class TestDictList:
         assert len(test_list) == 1
         assert len(sum) == 9
 
+    def test_sub(self, dict_list):
+        obj, test_list = dict_list
+        obj_list = [Object("test%d" % i) for i in range(2, 10)]
+        sum = test_list + obj_list
+        sub = sum - test_list
+        assert test_list[0].id == "test1"
+        assert sub[0].id == "test2"
+        assert len(sub) == 8
+        assert sum - obj_list == test_list
+
+    def test_isub(self, dict_list):
+        obj, test_list = dict_list
+        obj_list = [Object("test%d" % i) for i in range(2, 10)]
+        sum = test_list + obj_list
+        sum -= obj_list[2:4]
+        assert len(sum) == 7
+        with pytest.raises(ValueError):
+            sum -= [Object('bogus')]
+
     def test_init_copy(self, dict_list):
         obj, test_list = dict_list
         test_list.append(Object("test2"))
@@ -177,22 +216,25 @@ class TestDictList:
         test_list.append(obj2)
         result = test_list.query("test1")  # matches only test1
         assert len(result) == 1
-        result = test_list.query(u"test1")  # matches with unicode
+        result = test_list.query(u"test1", "id")  # matches with unicode
         assert len(result) == 1
         assert result[0] == obj
         result = test_list.query("foo", "name")  # matches only test2
         assert len(result) == 1
         assert result[0] == obj2
-        result = test_list.query("test")  # matches test1 and test2
+        result = test_list.query("test", "id")  # matches test1 and test2
         assert len(result) == 2
         # test with a regular expression
-        result = test_list.query(re.compile("test[0-9]"))
+        result = test_list.query(re.compile("test[0-9]"), "id")
         assert len(result) == 2
-        result = test_list.query(re.compile("test[29]"))
+        result = test_list.query(re.compile("test[29]"), "id")
         assert len(result) == 1
         # test query of name
         result = test_list.query(re.compile("foobar."), "name")
         assert len(result) == 1
+        # test query with lambda function
+        result = test_list.query(lambda x: x.id == 'test1')
+        assert len(result) == 1
 
     def test_removal(self):
         obj_list = DictList(Object("test%d" % (i)) for i in range(2, 10))
@@ -255,3 +297,58 @@ class TestDictList:
         # should only add 1 element
         assert len(test_list) == 2
         assert test_list.index("test2") == 1
+
+
+class TestVersionInfo:
+    SKIP_OPTIONAL = frozenset([
+        "cobra", "python-libsbml", "lxml", "matplotlib", "palettable", "scipy",
+        "pymatbridge"])
+
+    @pytest.fixture(scope="module")
+    def sys_info(self):
+        return get_sys_info()
+
+    @pytest.fixture(scope="module")
+    def pkg_info(self):
+        return get_pkg_info()
+
+    @pytest.mark.parametrize("key", SYS_ORDER)
+    def test_sys_info_key(self, key, sys_info):
+        assert key in sys_info
+
+    @pytest.mark.parametrize("key", SYS_ORDER)
+    def test_sys_info_value(self, key, sys_info):
+        assert len(sys_info[key]) > 0
+
+    @pytest.mark.parametrize("key", PKG_ORDER)
+    def test_pkg_info_key(self, key, pkg_info):
+        if key in self.SKIP_OPTIONAL:
+            pytest.skip()
+        assert key in pkg_info
+
+    @pytest.mark.parametrize("key", PKG_ORDER)
+    def test_pkg_info_value(self, key, pkg_info):
+        if key in self.SKIP_OPTIONAL:
+            pytest.skip()
+        assert len(pkg_info[key]) > 0
+
+    def test_show_versions(self, sys_info, pkg_info, capsys):
+        show_versions()
+        out, err = capsys.readouterr()
+        lines = out.split("\n")
+        i = 3
+        for key in SYS_ORDER:
+            line = lines[i]
+            assert line.startswith(key)
+            assert line.endswith(sys_info[key])
+            i += 1
+        i += 3
+        for key in PKG_ORDER:
+            line = lines[i]
+            if key in self.SKIP_OPTIONAL:
+                if line.startswith(key):
+                    i += 1
+                continue
+            assert line.startswith(key)
+            assert line.endswith(pkg_info[key])
+            i += 1
diff --git a/cobra/topology/__init__.py b/cobra/topology/__init__.py
index 1bb2a5e..96ab383 100644
--- a/cobra/topology/__init__.py
+++ b/cobra/topology/__init__.py
@@ -1,9 +1,5 @@
-from os import name as __name
-from sys import modules as __modules
-from warnings import warn
-if __name == 'java':
-    warn("%s is not yet supported on jython" % __modules[__name__])
+# -*- coding: utf-8 -*-
 
-else:
-    from .reporter_metabolites import *
-del __name, __modules
+from __future__ import absolute_import
+
+from cobra.topology.reporter_metabolites import identify_reporter_metabolites
diff --git a/cobra/topology/reporter_metabolites.py b/cobra/topology/reporter_metabolites.py
index 3b9ec74..ba09a6d 100644
--- a/cobra/topology/reporter_metabolites.py
+++ b/cobra/topology/reporter_metabolites.py
@@ -1,136 +1,9 @@
-# Based on Patil et al 2005 PNAS 102:2685-9
-# TODO: Validate cobra.core compliance
-from __future__ import print_function
-from numpy import array, mean, std, where
-from scipy.stats import norm, randint
-from six import iteritems
+# -*- coding: utf-8 -*-
 
+from __future__ import absolute_import
 
-def identify_reporter_metabolites(cobra_model, reaction_scores_dict,
-                                  number_of_randomizations=1000,
-                                  scoring_metric='default', score_type='p',
-                                  entire_network=False,
-                                  background_correction=True,
-                                  ignore_external_boundary_reactions=False):
-    """Calculate the aggregate Z-score for the metabolites in the model.
-    Ignore reactions that are solely spontaneous or orphan. Allow the scores to
-    have multiple columns / experiments.   This will change the way the output
-    is represented.
+from cobra.exceptions import DefunctError
 
-    cobra_model: A cobra.Model object
 
-    TODO: CHANGE TO USING DICTIONARIES for the_reactions: the_scores
-
-    reaction_scores_dict:  A dictionary where the keys are reactions in
-    cobra_model.reactions and the values are the scores.  Currently, only
-    supports a single numeric value as the value; however, this will be updated
-    to allow for lists
-
-    number_of_randomizations: Integer.  Number of random shuffles of the
-    scores to assess which are significant.
-
-    scoring_metric: default means divide by k**0.5
-
-    score_type: 'p' Is the only option at the moment and indicates p-value.
-
-    entire_network: Boolean. Currently, only compares scores calculated from
-    the_reactions
-
-    background_correction: Boolean.  If True apply background correction to the
-    aggreagate Z-score
-
-    ignore_external_boundary_reactions: Not yet implemented. Boolean.  If True
-    do not count exchange reactions when calculating the score.
-    """
-
-    # Add in a function to calculate based on correlation coefficients and to
-    # deal with other multidimensional data.
-    the_reactions = reaction_scores_dict.keys()
-    the_scores = reaction_scores_dict.values()
-    if score_type == 'p' and not hasattr(the_scores[0], '__iter__'):
-        # minimum and maximum p-values are used to prevent numerical problems.
-        # haven't decided whether an arbitrary min / max 1e-15 is preferred to
-        # blunting the ends based on the values closest to 0 or 1.
-        the_reactions = reaction_scores_dict.keys()
-        the_scores = array(reaction_scores_dict.values())
-        minimum_p = min(the_scores[the_scores.nonzero()[0]])
-        maximum_p = max(the_scores[where(the_scores < 1)[0]])
-        the_scores[where(the_scores < minimum_p)] = minimum_p
-        the_scores[where(the_scores > maximum_p)] = maximum_p
-        the_scores = -norm.ppf(the_scores)
-        # update the dictionary with the new scores
-        reaction_scores_dict = dict(zip(the_reactions, the_scores))
-    elif hasattr(the_scores[0], '__iter__'):
-        # In the case that the_scores is a list of lists, assume that each list
-        # is the score for each reaction in the_reactions across all reactions.
-        # Then for each metabolite, calculate the invnorm(|Pearson Correlation
-        # Coefficient| for each reaction pair that it links.
-        raise Exception("This isn't implemented yet")
-
-    # Get the connectivity for each metabolite
-    the_metabolites = set()
-    for x in reaction_scores_dict:
-        the_metabolites.update(x._metabolites)
-
-    metabolite_scores = {}
-    metabolite_connections = {}
-    # Calculate the score for each metabolite
-    for the_metabolite in the_metabolites:
-        nonspontaneous_connections = [x for x in the_metabolite._reaction
-                                      if x.gene_reaction_rule.lower() not in
-                                      ['s0001', '']]
-        tmp_score = 0
-        number_of_connections = len(nonspontaneous_connections)
-        for the_reaction in nonspontaneous_connections:
-            if the_reaction not in reaction_scores_dict:
-                if not entire_network:
-                    number_of_connections -= 1
-                continue
-            else:
-                tmp_score += reaction_scores_dict[the_reaction]
-        metabolite_scores[the_metabolite] = tmp_score
-        metabolite_connections[the_metabolite] = number_of_connections
-
-    # NOTE: Doing the corrections based only on the significantly perturbed
-    # scores is probably going to underestimate the significance.
-    if background_correction:
-        correction_dict = {}
-        for i in set(metabolite_connections.values()):
-            # if entire_network # add in a section to deal with the situation
-            # where the entire network structure is considered by only have
-            # p-values for a limited subset.
-            #
-            # Basically, what we're doing here is that for each i we select i
-            # scores number_of_randomizations times
-            the_random_indices = randint.rvs(
-                0, len(the_scores), size=(number_of_randomizations, i))
-            random_score_distribution = array(
-                [sum(the_scores[x])
-                 for x in list(the_random_indices)]) / i**0.5
-            correction_dict[i] = [mean(random_score_distribution),
-                                  std(random_score_distribution, ddof=1)]
-
-    for the_metabolite, the_score in iteritems(metabolite_scores):
-        number_of_connections = metabolite_connections[the_metabolite]
-        if number_of_connections > 0:
-            # Correct based on background distribution
-            if background_correction:
-                # if the list of scores is only for significant perturbations
-                # then the background correction shouldn't be applied because
-                # the current sampling method only takes into account
-                # the_scores not the entire network.  It'd be more accurate to
-                # assign unscored reactions a default score.
-                the_score = ((the_score / number_of_connections**.5) -
-                             correction_dict[number_of_connections][0]) / \
-                    correction_dict[number_of_connections][1]
-            else:
-                the_score = the_score / number_of_connections**.5
-            # Update the score
-            metabolite_scores[the_metabolite] = the_score
-
-    return_dictionary = {'scores': metabolite_scores,
-                         'connections': metabolite_connections}
-    if background_correction:
-        return_dictionary['corrections'] = correction_dict
-
-    return return_dictionary
+def identify_reporter_metabolites(*args, **kwargs):
+    raise DefunctError('identify_reporter_metabolites')
diff --git a/cobra/util/__init__.py b/cobra/util/__init__.py
new file mode 100644
index 0000000..ca34443
--- /dev/null
+++ b/cobra/util/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from cobra.util.context import *
+from cobra.util.solver import *
+from cobra.util.util import *
+from cobra.util.array import *
diff --git a/cobra/util/array.py b/cobra/util/array.py
new file mode 100644
index 0000000..3691581
--- /dev/null
+++ b/cobra/util/array.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+from collections import namedtuple
+
+import numpy as np
+import pandas as pd
+from six import iteritems
+
+try:
+    from scipy.sparse import dok_matrix, lil_matrix
+except ImportError:
+    dok_matrix, lil_matrix = None, None
+
+
+def create_stoichiometric_matrix(model, array_type='dense', dtype=None):
+    """Return a stoichiometric array representation of the given model.
+
+    The the columns represent the reactions and rows represent
+    metabolites. S[i,j] therefore contains the quantity of metabolite `i`
+    produced (negative for consumed) by reaction `j`.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The cobra model to construct the matrix for.
+    array_type : string
+        The type of array to construct. if 'dense', return a standard
+        numpy.array, 'dok', or 'lil' will construct a sparse array using
+        scipy of the corresponding type and 'DataFrame' will give a
+        pandas `DataFrame` with metabolite indices and reaction columns
+    dtype : data-type
+        The desired data-type for the array. If not given, defaults to float.
+
+    Returns
+    -------
+    matrix of class `dtype`
+        The stoichiometric matrix for the given model.
+    """
+    if array_type not in ('DataFrame', 'dense') and not dok_matrix:
+        raise ValueError('Sparse matrices require scipy')
+
+    if dtype is None:
+        dtype = np.float64
+
+    array_constructor = {
+        'dense': np.zeros, 'dok': dok_matrix, 'lil': lil_matrix,
+        'DataFrame': np.zeros,
+    }
+
+    n_metabolites = len(model.metabolites)
+    n_reactions = len(model.reactions)
+    array = array_constructor[array_type]((n_metabolites, n_reactions),
+                                          dtype=dtype)
+
+    m_ind = model.metabolites.index
+    r_ind = model.reactions.index
+
+    for reaction in model.reactions:
+        for metabolite, stoich in iteritems(reaction.metabolites):
+            array[m_ind(metabolite), r_ind(reaction)] = stoich
+
+    if array_type == 'DataFrame':
+        metabolite_ids = [met.id for met in model.metabolites]
+        reaction_ids = [rxn.id for rxn in model.reactions]
+        return pd.DataFrame(array, index=metabolite_ids, columns=reaction_ids)
+
+    else:
+        return array
+
+
+def nullspace(A, atol=1e-13, rtol=0):
+    """Compute an approximate basis for the nullspace of A.
+    The algorithm used by this function is based on the singular value
+    decomposition of `A`.
+
+    Parameters
+    ----------
+    A : numpy.ndarray
+        A should be at most 2-D.  A 1-D array with length k will be treated
+        as a 2-D with shape (1, k)
+    atol : float
+        The absolute tolerance for a zero singular value.  Singular values
+        smaller than `atol` are considered to be zero.
+    rtol : float
+        The relative tolerance.  Singular values less than rtol*smax are
+        considered to be zero, where smax is the largest singular value.
+
+    If both `atol` and `rtol` are positive, the combined tolerance is the
+    maximum of the two; that is::
+    tol = max(atol, rtol * smax)
+    Singular values smaller than `tol` are considered to be zero.
+
+    Returns
+    -------
+    numpy.ndarray
+        If `A` is an array with shape (m, k), then `ns` will be an array
+        with shape (k, n), where n is the estimated dimension of the
+        nullspace of `A`.  The columns of `ns` are a basis for the
+        nullspace; each element in numpy.dot(A, ns) will be approximately
+        zero.
+
+    Notes
+    -----
+    Taken from the numpy cookbook.
+    """
+    A = np.atleast_2d(A)
+    u, s, vh = np.linalg.svd(A)
+    tol = max(atol, rtol * s[0])
+    nnz = (s >= tol).sum()
+    ns = vh[nnz:].conj().T
+    return ns
+
+
+def constraint_matrices(model, array_type='dense', include_vars=False,
+                        zero_tol=1e-6):
+    """Create a matrix representation of the problem.
+
+    This is used for alternative solution approaches that do not use optlang.
+    The function will construct the equality matrix, inequality matrix and
+    bounds for the complete problem.
+
+    Notes
+    -----
+    To accomodate non-zero equalities the problem will add the variable
+    "const_one" which is a variable that equals one.
+
+    Arguments
+    ---------
+    model : cobra.Model
+        The model from which to obtain the LP problem.
+    array_type : string
+        The type of array to construct. if 'dense', return a standard
+        numpy.array, 'dok', or 'lil' will construct a sparse array using
+        scipy of the corresponding type and 'DataFrame' will give a
+        pandas `DataFrame` with metabolite indices and reaction columns.
+    zero_tol : float
+        The zero tolerance used to judge whether two bounds are the same.
+
+    Returns
+    -------
+    collections.namedtuple
+        A named tuple consisting of 6 matrices and 2 vectors:
+        - "equalities" is a matrix S such that S*vars = b. It includes a row
+          for each constraint and one column for each variable.
+        - "b" the right side of the equality equation such that S*vars = b.
+        - "inequalities" is a matrix M such that lb <= M*vars <= ub.
+          It contains a row for each inequality and as many columns as
+          variables.
+        - "bounds" is a compound matrix [lb ub] containing the lower and
+          upper bounds for the inequality constraints in M.
+        - "variable_fixed" is a boolean vector indicating whether the variable
+          at that index is fixed (lower bound == upper_bound) and
+          is thus bounded by an equality constraint.
+        - "variable_bounds" is a compound matrix [lb ub] containing the
+          lower and upper bounds for all variables.
+    """
+    if array_type not in ('DataFrame', 'dense') and not dok_matrix:
+        raise ValueError('Sparse matrices require scipy')
+
+    array_builder = {
+        'dense': np.array, 'dok': dok_matrix, 'lil': lil_matrix,
+        'DataFrame': pd.DataFrame,
+    }[array_type]
+
+    Problem = namedtuple("Problem",
+                         ["equalities", "b", "inequalities", "bounds",
+                          "variable_fixed", "variable_bounds"])
+    equality_rows = []
+    inequality_rows = []
+    inequality_bounds = []
+    b = []
+
+    for const in model.constraints:
+        lb = -np.inf if const.lb is None else const.lb
+        ub = np.inf if const.ub is None else const.ub
+        equality = (ub - lb) < zero_tol
+        coefs = const.get_linear_coefficients(model.variables)
+        coefs = [coefs[v] for v in model.variables]
+        if equality:
+            b.append(lb if abs(lb) > zero_tol else 0.0)
+            equality_rows.append(coefs)
+        else:
+            inequality_rows.append(coefs)
+            inequality_bounds.append([lb, ub])
+
+    var_bounds = np.array([[v.lb, v.ub] for v in model.variables])
+    fixed = var_bounds[:, 1] - var_bounds[:, 0] < zero_tol
+
+    results = Problem(
+        equalities=array_builder(equality_rows),
+        b=np.array(b),
+        inequalities=array_builder(inequality_rows),
+        bounds=array_builder(inequality_bounds),
+        variable_fixed=np.array(fixed),
+        variable_bounds=array_builder(var_bounds))
+
+    return results
diff --git a/cobra/util/context.py b/cobra/util/context.py
new file mode 100644
index 0000000..a85f0e6
--- /dev/null
+++ b/cobra/util/context.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+from functools import partial
+
+
+class HistoryManager(object):
+    """Record a list of actions to be taken at a later time. Used to
+    implement context managers that allow temporary changes to a
+    :class:`~cobra.core.Model`.
+
+    """
+
+    def __init__(self):
+
+        # self._history just acts as a stack
+        self._history = []
+
+    def __call__(self, operation):
+        """Add the corresponding method to the history stack.
+
+        Parameters
+        ----------
+        operation : `function`
+            A function to be called at a later time
+
+        """
+
+        self._history.append(operation)
+
+    def reset(self):
+        """Trigger executions for all items in the stack in reverse order"""
+        while self._history:
+            entry = self._history.pop()
+            entry()
+
+
+def get_context(obj):
+    """Search for a context manager"""
+    try:
+        return obj._contexts[-1]
+    except (AttributeError, IndexError):
+        pass
+
+    try:
+        return obj._model._contexts[-1]
+    except (AttributeError, IndexError):
+        pass
+
+    return None
+
+
+def resettable(f):
+    """A decorator to simplify the context management of simple object
+    attributes. Gets the value of the attribute prior to setting it, and stores
+    a function to set the value to the old value in the HistoryManager.
+    """
+
+    def wrapper(self, new_value):
+        context = get_context(self)
+        if context:
+            old_value = getattr(self, f.__name__)
+            # Don't clutter the context with unchanged variables
+            if old_value == new_value:
+                return
+            context(partial(f, self, old_value))
+
+        f(self, new_value)
+
+    return wrapper
diff --git a/cobra/util/solver.py b/cobra/util/solver.py
new file mode 100644
index 0000000..2fecd92
--- /dev/null
+++ b/cobra/util/solver.py
@@ -0,0 +1,444 @@
+# -*- coding: utf-8 -*-
+
+"""Additional helper functions for the optlang solvers.
+
+All functions integrate well with the context manager, meaning that
+all operations defined here are automatically reverted when used in a
+`with model:` block.
+
+The functions defined here together with the existing model functions should
+allow you to implement custom flux analysis methods with ease.
+"""
+
+from __future__ import absolute_import
+
+import re
+from functools import partial
+from collections import namedtuple
+from types import ModuleType
+from warnings import warn
+
+import optlang
+import sympy
+
+from cobra.exceptions import OptimizationError, OPTLANG_TO_EXCEPTIONS_DICT
+from cobra.util.context import get_context
+
+
+class SolverNotFound(Exception):
+    """A simple Exception when a solver can not be found."""
+
+    pass
+
+
+# Define all the solvers that are found in optlang.
+solvers = {match.split("_")[0]: getattr(optlang, match)
+           for match in dir(optlang) if "_interface" in match}
+
+# Defines all the QP solvers implemented in optlang.
+qp_solvers = ["cplex"]  # QP in gurobi not implemented yet
+
+
+def linear_reaction_coefficients(model, reactions=None):
+    """Coefficient for the reactions in a linear objective.
+
+    Parameters
+    ----------
+    model : cobra model
+        the model object that defined the objective
+    reactions : list
+        an optional list for the reactions to get the coefficients for. All
+        reactions if left missing.
+
+    Returns
+    -------
+    dict
+        A dictionary where the key is the reaction object and the value is
+        the corresponding coefficient. Empty dictionary if there are no
+        linear terms in the objective.
+    """
+    linear_coefficients = {}
+    reactions = model.reactions if not reactions else reactions
+    try:
+        objective_expression = model.solver.objective.expression
+        coefficients = objective_expression.as_coefficients_dict()
+    except AttributeError:
+        return linear_coefficients
+    for rxn in reactions:
+        forward_coefficient = coefficients.get(rxn.forward_variable, 0)
+        reverse_coefficient = coefficients.get(rxn.reverse_variable, 0)
+        if forward_coefficient != 0:
+            if forward_coefficient == -reverse_coefficient:
+                linear_coefficients[rxn] = float(forward_coefficient)
+    return linear_coefficients
+
+
+def _valid_atoms(model, expression):
+    """Check whether a sympy expression references the correct variables.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model in which to check for variables.
+    expression : sympy.Basic
+        A sympy expression.
+
+    Returns
+    -------
+    boolean
+        True if all referenced variables are contained in model, False
+        otherwise.
+    """
+    atoms = expression.atoms(optlang.interface.Variable)
+    return all(a.problem is model.solver for a in atoms)
+
+
+def set_objective(model, value, additive=False):
+    """Set the model objective.
+
+    Parameters
+    ----------
+    model : cobra model
+       The model to set the objective for
+    value : model.problem.Objective,
+            e.g. optlang.glpk_interface.Objective, sympy.Basic or dict
+
+        If the model objective is linear, the value can be a new Objective
+        object or a dictionary with linear coefficients where each key is a
+        reaction and the element the new coefficient (float).
+
+        If the objective is not linear and `additive` is true, only values
+        of class Objective.
+
+    additive : bool
+        If true, add the terms to the current objective, otherwise start with
+        an empty objective.
+    """
+    interface = model.problem
+    reverse_value = model.solver.objective.expression
+    reverse_value = interface.Objective(
+        reverse_value, direction=model.solver.objective.direction,
+        sloppy=True)
+
+    if isinstance(value, dict):
+        if not model.objective.is_Linear:
+            raise ValueError('can only update non-linear objectives '
+                             'additively using object of class '
+                             'model.problem.Objective, not %s' %
+                             type(value))
+
+        if not additive:
+            model.solver.objective = interface.Objective(
+                sympy.S.Zero, direction=model.solver.objective.direction)
+        for reaction, coef in value.items():
+            model.solver.objective.set_linear_coefficients(
+                {reaction.forward_variable: coef,
+                 reaction.reverse_variable: -coef})
+
+    elif isinstance(value, (sympy.Basic, optlang.interface.Objective)):
+        if isinstance(value, sympy.Basic):
+            value = interface.Objective(
+                value, direction=model.solver.objective.direction,
+                sloppy=False)
+        # Check whether expression only uses variables from current model
+        # clone the objective if not, faster than cloning without checking
+        if not _valid_atoms(model, value.expression):
+            value = interface.Objective.clone(value, model=model.solver)
+
+        if not additive:
+            model.solver.objective = value
+        else:
+            model.solver.objective += value.expression
+    else:
+        raise TypeError(
+            '%r is not a valid objective for %r.' % (value, model.solver))
+
+    context = get_context(model)
+    if context:
+        def reset():
+            model.solver.objective = reverse_value
+            model.solver.objective.direction = reverse_value.direction
+
+        context(reset)
+
+
+def interface_to_str(interface):
+    """Give a string representation for an optlang interface.
+
+    Parameters
+    ----------
+    interface : string, ModuleType
+        Full name of the interface in optlang or cobra representation.
+        For instance 'optlang.glpk_interface' or 'optlang-glpk'.
+
+    Returns
+    -------
+    string
+       The name of the interface as a string
+    """
+    if isinstance(interface, ModuleType):
+        interface = interface.__name__
+    return re.sub(r"optlang.|.interface", "", interface)
+
+
+def get_solver_name(mip=False, qp=False):
+    """Select a solver for a given optimization problem.
+
+    Parameters
+    ----------
+    mip : bool
+        Does the solver require mixed integer linear programming capabilities?
+    qp : bool
+        Does the solver require quadratic programming capabilities?
+
+    Returns
+    -------
+    string
+        The name of feasible solver.
+
+    Raises
+    ------
+    SolverNotFound
+        If no suitable solver could be found.
+    """
+    if len(solvers) == 0:
+        raise SolverNotFound("no solvers installed")
+    # Those lists need to be updated as optlang implements more solvers
+    mip_order = ["gurobi", "cplex", "glpk"]
+    lp_order = ["glpk", "cplex", "gurobi"]
+    qp_order = ["cplex"]
+
+    if mip is False and qp is False:
+        for solver_name in lp_order:
+            if solver_name in solvers:
+                return solver_name
+        # none of them are in the list order - so return the first one
+        return list(solvers)[0]
+    elif qp:  # mip does not yet matter for this determination
+        for solver_name in qp_order:
+            if solver_name in solvers:
+                return solver_name
+        raise SolverNotFound("no qp-capable solver found")
+    else:
+        for solver_name in mip_order:
+            if solver_name in solvers:
+                return solver_name
+    raise SolverNotFound("no mip-capable solver found")
+
+
+def choose_solver(model, solver=None, qp=False):
+    """Choose a solver given a solver name and model.
+
+    This will choose a solver compatible with the model and required
+    capabilities. Also respects model.solver where it can.
+
+    Parameters
+    ----------
+    model : a cobra model
+        The model for which to choose the solver.
+    solver : str, optional
+        The name of the solver to be used. Optlang solvers should be prefixed
+        by "optlang-", for instance "optlang-glpk".
+    qp : boolean, optional
+        Whether the solver needs Quadratic Programming capabilities.
+
+    Returns
+    -------
+    legacy : boolean
+        Whether the returned solver is a legacy (old cobra solvers) version or
+        an optlang solver (legacy = False).
+    solver : a cobra or optlang solver interface
+        Returns a valid solver for the problem. May be a cobra solver or an
+        optlang interface.
+
+    Raises
+    ------
+    SolverNotFound
+        If no suitable solver could be found.
+    """
+    legacy = False
+    if solver is None:
+        solver = model.problem
+    elif "optlang-" in solver:
+        solver = interface_to_str(solver)
+        solver = solvers[solver]
+    else:
+        legacy = True
+        solver = legacy_solvers.solver_dict[solver]
+
+    # Check for QP, raise error if no QP solver found
+    # optlang only since old interface interprets None differently
+    if qp and interface_to_str(solver) not in qp_solvers:
+        solver = solvers[get_solver_name(qp=True)]
+
+    return legacy, solver
+
+
+def add_cons_vars_to_problem(model, what, **kwargs):
+    """Add variables and constraints to a Model's solver object.
+
+    Useful for variables and constraints that can not be expressed with
+    reactions and lower/upper bounds. Will integrate with the Model's context
+    manager in order to revert changes upon leaving the context.
+
+    Parameters
+    ----------
+    model : a cobra model
+       The model to which to add the variables and constraints.
+    what : list or tuple of optlang variables or constraints.
+       The variables or constraints to add to the model. Must be of class
+       `model.problem.Variable` or
+       `model.problem.Constraint`.
+    **kwargs : keyword arguments
+        passed to solver.add()
+    """
+    context = get_context(model)
+
+    model.solver.add(what, **kwargs)
+    if context:
+        context(partial(model.solver.remove, what))
+
+
+def remove_cons_vars_from_problem(model, what):
+    """Remove variables and constraints from a Model's solver object.
+
+    Useful to temporarily remove variables and constraints from a Models's
+    solver object.
+
+    Parameters
+    ----------
+    model : a cobra model
+       The model from which to remove the variables and constraints.
+    what : list or tuple of optlang variables or constraints.
+       The variables or constraints to remove from the model. Must be of
+       class `model.problem.Variable` or
+       `model.problem.Constraint`.
+    """
+    context = get_context(model)
+
+    model.solver.remove(what)
+    if context:
+        context(partial(model.solver.add, what))
+
+
+def add_absolute_expression(model, expression, name="abs_var", ub=None,
+                            difference=0, add=True):
+    """Add the absolute value of an expression to the model.
+
+    Also defines a variable for the absolute value that can be used in other
+    objectives or constraints.
+
+    Parameters
+    ----------
+    model : a cobra model
+       The model to which to add the absolute expression.
+    expression : A sympy expression
+       Must be a valid expression within the Model's solver object. The
+       absolute value is applied automatically on the expression.
+    name : string
+       The name of the newly created variable.
+    ub : positive float
+       The upper bound for the variable.
+    difference : positive float
+        The difference between the expression and the variable.
+    add : bool
+        Whether to add the variable to the model at once.
+
+    Returns
+    -------
+    namedtuple
+        A named tuple with variable and two constraints (upper_constraint,
+        lower_constraint) describing the new variable and the constraints
+        that assign the absolute value of the expression to it.
+    """
+    Components = namedtuple('Components', ['variable', 'upper_constraint',
+                                           'lower_constraint'])
+    variable = model.problem.Variable(name, lb=0, ub=ub)
+    # The following constraints enforce variable > expression and
+    # variable > -expression
+    upper_constraint = model.problem.Constraint(expression - variable,
+                                                ub=difference,
+                                                name="abs_pos_" + name),
+    lower_constraint = model.problem.Constraint(expression + variable,
+                                                lb=difference,
+                                                name="abs_neg_" + name)
+    to_add = Components(variable, upper_constraint, lower_constraint)
+    if add:
+        add_cons_vars_to_problem(model, to_add)
+    return to_add
+
+
+def fix_objective_as_constraint(model, fraction=1, bound=None,
+                                name='fixed_objective_{}'):
+    """Fix current objective as an additional constraint.
+
+    When adding constraints to a model, such as done in pFBA which
+    minimizes total flux, these constraints can become too powerful,
+    resulting in solutions that satisfy optimality but sacrifices too
+    much for the original objective function. To avoid that, we can fix
+    the current objective value as a constraint to ignore solutions that
+    give a lower (or higher depending on the optimization direction)
+    objective value than the original model.
+
+    When done with the model as a context, the modification to the
+    objective will be reverted when exiting that context.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to operate on
+    fraction : float
+        The fraction of the optimum the objective is allowed to reach.
+    bound : float, None
+        The bound to use instead of fraction of maximum optimal value. If
+        not None, fraction is ignored.
+    name : str
+        Name of the objective. May contain one `{}` placeholder which is filled
+        with the name of the old objective.
+    """
+    fix_objective_name = name.format(model.objective.name)
+    if fix_objective_name in model.constraints:
+        model.solver.remove(fix_objective_name)
+    if bound is None:
+        bound = model.slim_optimize(error_value=None) * fraction
+    if model.objective.direction == 'max':
+        ub, lb = None, bound
+    else:
+        ub, lb = bound, None
+    constraint = model.problem.Constraint(
+        model.objective.expression,
+        name=fix_objective_name, ub=ub, lb=lb)
+    add_cons_vars_to_problem(model, constraint, sloppy=True)
+
+
+def check_solver_status(status, raise_error=False):
+    """Perform standard checks on a solver's status."""
+    if status == optlang.interface.OPTIMAL:
+        return
+    elif status == optlang.interface.INFEASIBLE and not raise_error:
+        warn("solver status is '{}'".format(status), UserWarning)
+    elif status is None:
+        raise RuntimeError(
+            "model was not optimized yet or solver context switched")
+    else:
+        raise OptimizationError("solver status is '{}'".format(status))
+
+
+def assert_optimal(model, message='optimization failed'):
+    """Assert model solver status is optimal.
+
+    Do nothing if model solver status is optimal, otherwise throw
+    appropriate exception depending on the status.
+
+    Parameters
+    ----------
+    model : cobra.Model
+        The model to check the solver status for.
+    message : str (optional)
+        Message to for the exception if solver status was not optimal.
+    """
+    if model.solver.status != optlang.interface.OPTIMAL:
+        raise OPTLANG_TO_EXCEPTIONS_DICT[model.solver.status](message)
+
+
+import cobra.solvers as legacy_solvers  # noqa
diff --git a/cobra/util/util.py b/cobra/util/util.py
new file mode 100644
index 0000000..3aeb377
--- /dev/null
+++ b/cobra/util/util.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import
+
+
+class AutoVivification(dict):
+    """Implementation of perl's autovivification feature. Checkout
+    http://stackoverflow.com/a/652284/280182 """
+
+    def __getitem__(self, item):
+        try:
+            return dict.__getitem__(self, item)
+        except KeyError:
+            value = self[item] = type(self)()
+            return value
diff --git a/cobra/util/version_info.py b/cobra/util/version_info.py
new file mode 100644
index 0000000..1a7b966
--- /dev/null
+++ b/cobra/util/version_info.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+# Adapated from:
+# https://github.com/pandas-dev/pandas/blob/master/pandas/util/_print_versions.py
+# which is published under a BSD license.
+
+from __future__ import absolute_import, print_function
+
+from builtins import dict
+
+import platform
+
+import pip
+
+__all__ = ("show_versions",)
+
+SYS_ORDER = [
+    "OS",
+    "OS-release",
+    "Python"
+]
+PKG_ORDER = [
+    "pip",
+    "setuptools",
+    "cobra",
+    "future",
+    "swiglpk",
+    "optlang",
+    "ruamel.yaml",
+    "pandas",
+    "numpy",
+    "tabulate",
+    "python-libsbml",
+    "lxml",
+    "scipy",
+    "matplotlib",
+    "palettable",
+    "pymatbridge"
+]
+
+
+def get_sys_info():
+    """Returns system information as a dict."""
+    blob = dict()
+    blob["OS"] = platform.system()
+    blob["OS-release"] = platform.release()
+    blob["Python"] = platform.python_version()
+    return blob
+
+
+def get_pkg_info():
+    """Returns Python package information as a dict."""
+    # TODO: Basically copying the requirements from setup.py is brittle,
+    # should come up with a better way in future, for example,
+    # using requirements files that can be read in.
+    dependencies = frozenset(PKG_ORDER)
+    blob = dict()
+    for dist in pip.get_installed_distributions():
+        if dist.project_name in dependencies:
+            blob[dist.project_name] = dist.version
+    return blob
+
+
+def show_versions():
+    """Print the formatted information to standard out."""
+    info = get_sys_info()
+    info.update(get_pkg_info())
+    format_str = "{:<%d} {:>%d}" % (max(map(len, info)),
+                                    max(map(len, info.values())))
+    print("\nSystem Information")
+    print("==================")
+    for name in SYS_ORDER:
+        print(format_str.format(name, info[name]))
+
+    print("\nPackage Versions")
+    print("================")
+    for name in PKG_ORDER:
+        if name in info:
+            print(format_str.format(name, info[name]))
diff --git a/cobra/version.py b/cobra/version.py
deleted file mode 100644
index 14cc2d9..0000000
--- a/cobra/version.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python
-"""Get version identification from git
-
-See the documentation of get_version for more information
-
-"""
-from __future__ import print_function
-
-from subprocess import check_output, CalledProcessError
-from os import path, name, devnull, environ, listdir
-
-__all__ = ("get_version",)
-
-CURRENT_DIRECTORY = path.dirname(path.abspath(__file__))
-VERSION_FILE = path.join(CURRENT_DIRECTORY, "VERSION")
-
-GIT_COMMAND = "git"
-
-if name == "nt":
-    def find_git_on_windows():
-        """find the path to the git executable on windows"""
-        # first see if git is in the path
-        try:
-            check_output(["where", "/Q", "git"])
-            # if this command succeeded, git is in the path
-            return "git"
-        # catch the exception thrown if git was not found
-        except CalledProcessError:
-            pass
-        # There are several locations git.exe may be hiding
-        possible_locations = []
-        # look in program files for msysgit
-        if "PROGRAMFILES(X86)" in environ:
-            possible_locations.append("%s/Git/cmd/git.exe" %
-                                      environ["PROGRAMFILES(X86)"])
-        if "PROGRAMFILES" in environ:
-            possible_locations.append("%s/Git/cmd/git.exe" %
-                                      environ["PROGRAMFILES"])
-        # look for the github version of git
-        if "LOCALAPPDATA" in environ:
-            github_dir = "%s/GitHub" % environ["LOCALAPPDATA"]
-            if path.isdir(github_dir):
-                for subdir in listdir(github_dir):
-                    if not subdir.startswith("PortableGit"):
-                        continue
-                    possible_locations.append("%s/%s/bin/git.exe" %
-                                              (github_dir, subdir))
-        for possible_location in possible_locations:
-            if path.isfile(possible_location):
-                return possible_location
-        # git was not found
-        return "git"
-
-    GIT_COMMAND = find_git_on_windows()
-
-
-def call_git_describe(abbrev=7):
-    """return the string output of git desribe"""
-    try:
-        with open(devnull, "w") as fnull:
-            arguments = [GIT_COMMAND, "describe", "--tags",
-                         "--abbrev=%d" % abbrev]
-            return check_output(arguments, cwd=CURRENT_DIRECTORY,
-                                stderr=fnull).decode("ascii").strip()
-    except (OSError, CalledProcessError):
-        return None
-
-
-def format_git_describe(git_str, pep440=False):
-    """format the result of calling 'git describe' as a python version"""
-    if git_str is None:
-        return None
-    if "-" not in git_str:  # currently at a tag
-        return git_str
-    else:
-        # formatted as version-N-githash
-        # want to convert to version.postN-githash
-        git_str = git_str.replace("-", ".post", 1)
-        if pep440:  # does not allow git hash afterwards
-            return git_str.split("-")[0]
-        else:
-            return git_str.replace("-g", "+git")
-
-
-def read_release_version():
-    """Read version information from VERSION file"""
-    try:
-        with open(VERSION_FILE, "r") as infile:
-            version = str(infile.read().strip())
-        if len(version) == 0:
-            version = None
-        return version
-    except IOError:
-        return None
-
-
-def update_release_version():
-    """Update VERSION file"""
-    version = get_version(pep440=True)
-    with open(VERSION_FILE, "w") as outfile:
-        outfile.write(version)
-        outfile.write("\n")
-
-
-def get_version(pep440=False):
-    """Tracks the version number.
-
-    pep440: bool
-        When True, this function returns a version string suitable for
-        a release as defined by PEP 440. When False, the githash (if
-        available) will be appended to the version string.
-
-    The file VERSION holds the version information. If this is not a git
-    repository, then it is reasonable to assume that the version is not
-    being incremented and the version returned will be the release version as
-    read from the file.
-
-    However, if the script is located within an active git repository,
-    git-describe is used to get the version information.
-
-    The file VERSION will need to be changed by manually. This should be done
-    before running git tag (set to the same as the version in the tag).
-
-    """
-
-    git_version = format_git_describe(call_git_describe(), pep440=pep440)
-    if git_version is None:  # not a git repository
-        return read_release_version()
-    return git_version
-
-
-if __name__ == "__main__":
-    print(get_version())
diff --git a/config.sh b/config.sh
index b855e0a..9a5d12a 100644
--- a/config.sh
+++ b/config.sh
@@ -7,25 +7,34 @@ function pre_build {
     if [ -n "$IS_OSX" ]; then
         export CC=clang
         export CXX=clang++
-		export CFLAGS="-fPIC -O3 -arch i386 -arch x86_64 -g -DNDEBUG -mmacosx-version-min=10.6"
-	else
-		yum install -y libxslt libxml2 libxml2-devel libxslt-devel
-	fi
-	curl -O http://ftp.gnu.org/gnu/glpk/glpk-4.60.tar.gz
-	tar xzf glpk-4.60.tar.gz
-	(cd glpk-4.60 \
-			&& ./configure \
-			&& make \
-			&& make install)
-	pip install cython
-	cython -a cobra/solvers/cglpk.pyx
-	export PATH="$PATH:/usr/local/bin"
+        export CFLAGS="-fPIC -O3 -arch i386 -arch x86_64 -g -DNDEBUG -mmacosx-version-min=10.6"
+    else
+        yum install -y libxslt libxml2 libxml2-devel libxslt-devel
+    fi
+    curl -O http://ftp.gnu.org/gnu/glpk/glpk-4.61.tar.gz
+    tar xzf glpk-4.61.tar.gz
+    (cd glpk-4.61 \
+            && ./configure --disable-reentrant \
+            && make \
+            && make install)
+    pip install cython
+    cython -a cobra/solvers/cglpk.pyx
+    export PATH="$PATH:/usr/local/bin"
 }
 
 function build_wheel {
     # Set default building method to pip
     build_bdist_wheel $@
-	(cd glpk-4.60 && make uninstall)
+    # setup.py sdist fails with
+    # error: [Errno 2] No such file or directory: 'venv/lib/python3.5/_dummy_thread.py'
+    # for python less than 3.5
+    if [[ `python -c 'import sys; print(sys.version.split()[0] >= "3.6.0")'` == "True" ]]; then
+        python setup.py sdist --dist-dir $(abspath ${WHEEL_SDIR:-wheelhouse})
+    else
+        echo "skip sdist"
+    fi
+    # remove glpk installation to ensure using the packaged binaries
+	(cd glpk-4.61 && make uninstall)
 }
 
 function run_tests_in_repo {
@@ -45,11 +54,15 @@ function run_tests_in_repo {
 	fi
 	mkdir -p $HOME/.config/matplotlib
 	echo 'backend: Agg' >> $HOME/.config/matplotlib/matplotlibrc
-	(pytest --pyargs -v -rsx --cov=cobra --cov-report=xml --cov-config=../.coveragerc --benchmark-skip cobra &&
-			mv coverage.xml ..)
+	COVERAGEXML=`python -c "import os,sys; print(os.path.realpath('coverage.xml'))"`
+	COVERAGERC=`python -c "import os,sys; print(os.path.realpath('../.coveragerc'))"`
+	(pytest --pyargs -v -rsx --cov=cobra --cov-report=xml:${COVERAGEXML} \
+			--cov-config=${COVERAGERC} --benchmark-skip cobra &&
+			mv ${COVERAGEXML} ..)
 }
 
 function run_tests {
     # Runs tests on installed distribution from an empty directory
+    pip install python-libsbml==5.12.1 -f https://s3.eu-central-1.amazonaws.com/moonlight-science/wheelhouse/index.html --no-cache-dir
     run_tests_in_repo
 }
diff --git a/develop-requirements.txt b/develop-requirements.txt
index cb2a77b..6e5a340 100644
--- a/develop-requirements.txt
+++ b/develop-requirements.txt
@@ -8,10 +8,13 @@ lxml
 matplotlib
 palettable
 zmq
-pymatbridge
 pandas>=0.17.0
 tabulate
 tox
 pep8
 pytest
+optlang
+swiglpk
 pytest-benchmark
+bumpversion
+pydocstyle
diff --git a/documentation_builder/autodoc.sh b/documentation_builder/autodoc.sh
index 36a46ff..f5a71b3 100755
--- a/documentation_builder/autodoc.sh
+++ b/documentation_builder/autodoc.sh
@@ -1,5 +1,5 @@
 rm cobra.rst cobra.*.rst
-sphinx-apidoc -o . ../cobra ../cobra/oven ../cobra/external \
-    ../cobra/test ../cobra/solvers/ ../cobra/test_all.py \
-    ../cobra/version.py ../cobra/solvers/legacy.py
+sphinx-apidoc -o . ../cobra \
+    ../cobra/test ../cobra/solvers/ \
+    ../cobra/solvers/legacy.py
 rm modules.rst
diff --git a/documentation_builder/building_model.ipynb b/documentation_builder/building_model.ipynb
index 7068791..a284cec 100644
--- a/documentation_builder/building_model.ipynb
+++ b/documentation_builder/building_model.ipynb
@@ -2,10 +2,21 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "# Building a Model"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "# Building a Model\n",
-    "\n",
     "This simple example demonstrates how to create a model, create a reaction, and then add the reaction to the model.\n",
     "\n",
     "We'll use the '3OAS140' reaction from the STM_1.0 model:\n",
@@ -19,34 +30,53 @@
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": true,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import print_function"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
     "from cobra import Model, Reaction, Metabolite\n",
     "# Best practise: SBML compliant IDs\n",
-    "cobra_model = Model('example_cobra_model')\n",
+    "model = Model('example_model')\n",
     "\n",
     "reaction = Reaction('3OAS140')\n",
     "reaction.name = '3 oxoacyl acyl carrier protein synthase n C140 '\n",
     "reaction.subsystem = 'Cell Envelope Biosynthesis'\n",
     "reaction.lower_bound = 0.  # This is the default\n",
-    "reaction.upper_bound = 1000.  # This is the default\n",
-    "reaction.objective_coefficient = 0. # this is the default"
+    "reaction.upper_bound = 1000.  # This is the default"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "We need to create metabolites as well. If we were using an existing model, we could use get_by_id to get the apporpriate Metabolite objects instead."
+    "We need to create metabolites as well. If we were using an existing model, we could use `Model.get_by_id` to get the appropriate Metabolite objects instead."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 3,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
@@ -60,21 +90,13 @@
     "    formula='C25H45N2O9PRS',\n",
     "    name='3-Oxotetradecanoyl-acyl-carrier-protein',\n",
     "    compartment='c')\n",
-    "co2_c = Metabolite(\n",
-    "    'co2_c',\n",
-    "    formula='CO2',\n",
-    "    name='CO2',\n",
-    "    compartment='c')\n",
+    "co2_c = Metabolite('co2_c', formula='CO2', name='CO2', compartment='c')\n",
     "malACP_c = Metabolite(\n",
     "    'malACP_c',\n",
     "    formula='C14H22N2O10PRS',\n",
     "    name='Malonyl-acyl-carrier-protein',\n",
     "    compartment='c')\n",
-    "h_c = Metabolite(\n",
-    "    'h_c',\n",
-    "    formula='H',\n",
-    "    name='H',\n",
-    "    compartment='c')\n",
+    "h_c = Metabolite('h_c', formula='H', name='H', compartment='c')\n",
     "ddcaACP_c = Metabolite(\n",
     "    'ddcaACP_c',\n",
     "    formula='C23H43N2O8PRS',\n",
@@ -84,16 +106,21 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "Adding metabolites to a reaction requires using a dictionary of the metabolites and their stoichiometric coefficients. A group of metabolites can be added all at once, or they can be added one at a time."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 4,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -102,44 +129,50 @@
        "'ddcaACP_c + h_c + malACP_c --> 3omrsACP_c + ACP_c + co2_c'"
       ]
      },
-     "execution_count": 3,
+     "execution_count": 4,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "reaction.add_metabolites({malACP_c: -1.0,\n",
-    "                          h_c: -1.0,\n",
-    "                          ddcaACP_c: -1.0,\n",
-    "                          co2_c: 1.0,\n",
-    "                          ACP_c: 1.0,\n",
-    "                          omrsACP_c: 1.0})\n",
-    "\n",
+    "reaction.add_metabolites({\n",
+    "    malACP_c: -1.0,\n",
+    "    h_c: -1.0,\n",
+    "    ddcaACP_c: -1.0,\n",
+    "    co2_c: 1.0,\n",
+    "    ACP_c: 1.0,\n",
+    "    omrsACP_c: 1.0\n",
+    "})\n",
     "\n",
     "reaction.reaction  # This gives a string representation of the reaction"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in [Schellenberger et al 2011 Nature Protocols 6(9):1290-307](http://dx.doi.org/doi:10.1038/nprot.2011.308). We will assign the gene reaction rule string, which will automatically create the corresponding gene objects."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 5,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "frozenset({<Gene STM2378 at 0x7fada4592908>, <Gene STM1197 at 0x7fada45927f0>})"
+       "frozenset({<Gene STM1197 at 0x7f2d85786898>, <Gene STM2378 at 0x7f2dc45437f0>})"
       ]
      },
-     "execution_count": 4,
+     "execution_count": 5,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -151,16 +184,21 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "At this point in time, the model is still empty"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 6,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -174,23 +212,28 @@
     }
    ],
    "source": [
-    "print('%i reactions initially' % len(cobra_model.reactions))\n",
-    "print('%i metabolites initially' % len(cobra_model.metabolites))\n",
-    "print('%i genes initially' % len(cobra_model.genes))"
+    "print('%i reactions initially' % len(model.reactions))\n",
+    "print('%i metabolites initially' % len(model.metabolites))\n",
+    "print('%i genes initially' % len(model.genes))"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "We will add the reaction to the model, which will also add all associated metabolites and genes"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 7,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -204,26 +247,31 @@
     }
    ],
    "source": [
-    "cobra_model.add_reaction(reaction)\n",
+    "model.add_reactions([reaction])\n",
     "\n",
     "# Now there are things in the model\n",
-    "print('%i reaction' % len(cobra_model.reactions))\n",
-    "print('%i metabolites' % len(cobra_model.metabolites))\n",
-    "print('%i genes' % len(cobra_model.genes))"
+    "print('%i reaction' % len(model.reactions))\n",
+    "print('%i metabolites' % len(model.metabolites))\n",
+    "print('%i genes' % len(model.genes))"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "We can iterate through the model objects to observe the contents"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 8,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -236,17 +284,17 @@
       "\n",
       "Metabolites\n",
       "-----------\n",
-      "3omrsACP_c : C25H45N2O9PRS\n",
-      "ddcaACP_c : C23H43N2O8PRS\n",
-      "    ACP_c : C11H21N2O7PRS\n",
       "    co2_c : CO2\n",
       " malACP_c : C14H22N2O10PRS\n",
       "      h_c : H\n",
+      "3omrsACP_c : C25H45N2O9PRS\n",
+      "ddcaACP_c : C23H43N2O8PRS\n",
+      "    ACP_c : C11H21N2O7PRS\n",
       "\n",
       "Genes\n",
       "-----\n",
-      "STM2378 is associated with reactions: {3OAS140}\n",
-      "STM1197 is associated with reactions: {3OAS140}\n"
+      "STM1197 is associated with reactions: {3OAS140}\n",
+      "STM2378 is associated with reactions: {3OAS140}\n"
      ]
     }
    ],
@@ -254,23 +302,89 @@
     "# Iterate through the the objects in the model\n",
     "print(\"Reactions\")\n",
     "print(\"---------\")\n",
-    "for x in cobra_model.reactions:\n",
+    "for x in model.reactions:\n",
     "    print(\"%s : %s\" % (x.id, x.reaction))\n",
     "\n",
     "print(\"\")\n",
     "print(\"Metabolites\")\n",
     "print(\"-----------\")\n",
-    "for x in cobra_model.metabolites:\n",
+    "for x in model.metabolites:\n",
     "    print('%9s : %s' % (x.id, x.formula))\n",
     "\n",
     "print(\"\")\n",
     "print(\"Genes\")\n",
     "print(\"-----\")\n",
-    "for x in cobra_model.genes:\n",
+    "for x in model.genes:\n",
     "    associated_ids = (i.id for i in x.reactions)\n",
     "    print(\"%s is associated with reactions: %s\" %\n",
     "          (x.id, \"{\" + \", \".join(associated_ids) + \"}\"))"
    ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Last we need to set the objective of the model. Here, we just want this to be the maximization of the flux in the single reaction we added and we do this by assigning the reaction's identifier to the `objective` property of the model."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "model.objective = '3OAS140'"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "The created objective is a symbolic algebraic expression and we can examine it by printing it"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "-1.0*3OAS140_reverse_65ddc + 1.0*3OAS140\n",
+      "max\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(model.objective.expression)\n",
+    "print(model.objective.direction)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "which here shows that the solver will maximize the flux in the forward direction."
+   ]
   }
  ],
  "metadata": {
@@ -289,7 +403,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.5.2"
   }
  },
  "nbformat": 4,
diff --git a/documentation_builder/cobra.core.rst b/documentation_builder/cobra.core.rst
index 75b4afd..504ce1f 100644
--- a/documentation_builder/cobra.core.rst
+++ b/documentation_builder/cobra.core.rst
@@ -4,82 +4,82 @@ cobra.core package
 Submodules
 ----------
 
-cobra.core.ArrayBasedModel module
+cobra.core.arraybasedmodel module
 ---------------------------------
 
-.. automodule:: cobra.core.ArrayBasedModel
+.. automodule:: cobra.core.arraybasedmodel
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.DictList module
+cobra.core.dictlist module
 --------------------------
 
-.. automodule:: cobra.core.DictList
+.. automodule:: cobra.core.dictlist
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Formula module
+cobra.core.formula module
 -------------------------
 
-.. automodule:: cobra.core.Formula
+.. automodule:: cobra.core.formula
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Gene module
+cobra.core.gene module
 ----------------------
 
-.. automodule:: cobra.core.Gene
+.. automodule:: cobra.core.gene
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Metabolite module
+cobra.core.metabolite module
 ----------------------------
 
-.. automodule:: cobra.core.Metabolite
+.. automodule:: cobra.core.metabolite
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Model module
+cobra.core.model module
 -----------------------
 
-.. automodule:: cobra.core.Model
+.. automodule:: cobra.core.model
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Object module
+cobra.core.object module
 ------------------------
 
-.. automodule:: cobra.core.Object
+.. automodule:: cobra.core.object
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Reaction module
+cobra.core.reaction module
 --------------------------
 
-.. automodule:: cobra.core.Reaction
+.. automodule:: cobra.core.reaction
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Solution module
+cobra.core.solution module
 --------------------------
 
-.. automodule:: cobra.core.Solution
+.. automodule:: cobra.core.solution
     :members:
     :undoc-members:
     :show-inheritance:
 
-cobra.core.Species module
+cobra.core.species module
 -------------------------
 
-.. automodule:: cobra.core.Species
+.. automodule:: cobra.core.species
     :members:
     :undoc-members:
     :show-inheritance:
diff --git a/documentation_builder/cobra.flux_analysis.rst b/documentation_builder/cobra.flux_analysis.rst
index 4763c36..a8bb287 100644
--- a/documentation_builder/cobra.flux_analysis.rst
+++ b/documentation_builder/cobra.flux_analysis.rst
@@ -20,14 +20,6 @@ cobra.flux_analysis.double_deletion module
     :undoc-members:
     :show-inheritance:
 
-cobra.flux_analysis.essentiality module
----------------------------------------
-
-.. automodule:: cobra.flux_analysis.essentiality
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
 cobra.flux_analysis.gapfilling module
 -------------------------------------
 
@@ -76,6 +68,14 @@ cobra.flux_analysis.reaction module
     :undoc-members:
     :show-inheritance:
 
+cobra.flux_analysis.sampling module
+-----------------------------------
+
+.. automodule:: cobra.flux_analysis.sampling
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
 cobra.flux_analysis.single_deletion module
 ------------------------------------------
 
diff --git a/documentation_builder/cobra.rst b/documentation_builder/cobra.rst
index 494b311..81f032f 100644
--- a/documentation_builder/cobra.rst
+++ b/documentation_builder/cobra.rst
@@ -12,6 +12,27 @@ Subpackages
     cobra.io
     cobra.manipulation
     cobra.topology
+    cobra.util
+
+Submodules
+----------
+
+cobra.config module
+-------------------
+
+.. automodule:: cobra.config
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+cobra.exceptions module
+-----------------------
+
+.. automodule:: cobra.exceptions
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
 
 Module contents
 ---------------
diff --git a/documentation_builder/cobra.util.rst b/documentation_builder/cobra.util.rst
new file mode 100644
index 0000000..1b2b49e
--- /dev/null
+++ b/documentation_builder/cobra.util.rst
@@ -0,0 +1,46 @@
+cobra.util package
+==================
+
+Submodules
+----------
+
+cobra.util.array module
+-----------------------
+
+.. automodule:: cobra.util.array
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+cobra.util.context module
+-------------------------
+
+.. automodule:: cobra.util.context
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+cobra.util.solver module
+------------------------
+
+.. automodule:: cobra.util.solver
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+cobra.util.util module
+----------------------
+
+.. automodule:: cobra.util.util
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: cobra.util
+    :members:
+    :undoc-members:
+    :show-inheritance:
diff --git a/documentation_builder/conf.py b/documentation_builder/conf.py
index bc80bdf..f695145 100644
--- a/documentation_builder/conf.py
+++ b/documentation_builder/conf.py
@@ -3,7 +3,8 @@
 # cobra documentation build configuration file, created by
 # sphinx-quickstart on Wed Jun 13 19:17:34 2012.
 #
-# This file is execfile()d with the current directory set to its containing dir.
+# This file is execfile()d with the current directory set to its containing
+# dir.
 #
 # Note that not all possible configuration values are present in this
 # autogenerated file.
@@ -35,9 +36,12 @@ class Mock(object):
         else:
             return Mock()
 
-MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'scipy.io', 'scipy.stats',
-                'glpk', 'gurobipy', 'gurobipy.GRB', 'cplex', 'pp', 'libsbml',
-                'cplex.exceptions', 'pandas', 'tabulate']
+
+MOCK_MODULES = ['numpy', 'scipy', 'scipy.optimize', 'scipy.sparse', 'scipy.io',
+                'scipy.stats', 'pp', 'libsbml', 'pandas', 'tabulate',
+                'optlang', 'optlang.interface', 'sympy', 'sympy.core',
+                'sympy.core.singleton', 'future', 'future.utils', 'ruamel',
+                'ruamel.yaml']
 for mod_name in MOCK_MODULES:
     sys.modules[mod_name] = Mock()
 
@@ -54,25 +58,21 @@ master_doc = 'index'
 
 # General information about the project.
 project = u'cobra'
-copyright = u'2016, Daniel Robert Hyduke and Ali Ebrahim'
+copyright = u'2016, The cobrapy core team'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
-# The short X.Y version.
-from cobra.version import get_version, read_release_version
-version = read_release_version()
-# The full version, including alpha/beta/rc tags.
-release = get_version()
+from cobra import __version__ as release
+version = '.'.join(release.split('.')[:2])
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build', 'version.py', '.ipynb_checkpoints']
+exclude_patterns = ['_build', '.ipynb_checkpoints']
 
 pygments_style = 'sphinx'
 
-
 # -- Options for HTML output --------------------------------------------------
 
 mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
@@ -80,21 +80,21 @@ mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-'papersize': 'letterpaper',
+    # The paper size ('letterpaper' or 'a4paper').
+    'papersize': 'letterpaper',
 
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
+    # The font size ('10pt', '11pt' or '12pt').
+    # 'pointsize': '10pt',
 
-# Additional stuff for the LaTeX preamble.
-'preamble': r'\usepackage{amsmath,amssymb}',
+    # Additional stuff for the LaTeX preamble.
+    'preamble': r'\usepackage{amsmath,amssymb}',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'cobra.tex', u'cobra Documentation',
-   u'Daniel Robert Hyduke and Ali Ebrahim', 'manual'),
+    ('index', 'cobra.tex', u'cobra Documentation',
+     u'The cobrapy core team', 'manual'),
 ]
 
 # -- Options for manual page output --------------------------------------------
@@ -103,7 +103,7 @@ latex_documents = [
 # (source start file, name, description, authors, manual section).
 man_pages = [
     ('index', 'cobra', u'cobra Documentation',
-     [u'Daniel Robert Hyduke and Ali Ebrahim'], 1)
+     [u'The cobrapy core team'], 1)
 ]
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -112,14 +112,14 @@ man_pages = [
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', 'cobra', u'cobra Documentation',
-   u'Daniel Robert Hyduke and Ali Ebrahim', 'cobra',
-   'A package for constraints-based modeling of biological networks',
-   'Miscellaneous'),
+    ('index', 'cobra', u'cobra Documentation',
+     u'The cobrapy core team', 'cobra',
+     'A package for constraints-based modeling of biological networks',
+     'Miscellaneous'),
 ]
 
 # Example configuration for intersphinx: refer to the Python standard library.
 intersphinx_mapping = {"http://docs.python.org/": None,
                        "http://docs.scipy.org/doc/numpy/": None,
                        "http://docs.scipy.org/doc/scipy/reference": None}
-intersphinx_cache_limit = 10     # days to keep the cached inventories
+intersphinx_cache_limit = 10  # days to keep the cached inventories
diff --git a/documentation_builder/constraints_objectives.ipynb b/documentation_builder/constraints_objectives.ipynb
new file mode 100644
index 0000000..443cc30
--- /dev/null
+++ b/documentation_builder/constraints_objectives.ipynb
@@ -0,0 +1,484 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "# Tailored constraints, variables and objectives"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Thanks to the use of symbolic expressions via the optlang mathematical modeling package, it is relatively straight-forward to add new variables, constraints and advanced objectives that can not easily be formulated as a combination of different reaction and their corresponding upper and lower bounds. Here we demonstrate this optlang functionality which is exposed via the `model.solver.interface`."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "## Constraints"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Suppose we want to ensure that two reactions have the same flux in our model. We can add this criteria as constraint to our model using the optlang solver interface by simply defining the relevant expression as follows."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "import cobra.test\n",
+    "model = cobra.test.create_test_model('textbook')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "same_flux = model.problem.Constraint(\n",
+    "    model.reactions.FBA.flux_expression - model.reactions.NH4t.flux_expression,\n",
+    "    lb=0,\n",
+    "    ub=0)\n",
+    "model.add_cons_vars(same_flux)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "The flux for our reaction of interest is obtained by the `model.reactions.FBA.flux_expression` which is simply the sum of the forward and reverse flux, i.e.,"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "1.0*FBA - 1.0*FBA_reverse_84806"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model.reactions.FBA.flux_expression"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Now I can maximize growth rate whilst the fluxes of reactions 'FBA' and 'NH4t' are constrained to be (near) identical."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "4.66274904774 4.66274904774 0.855110960926157\n"
+     ]
+    }
+   ],
+   "source": [
+    "solution = model.optimize()\n",
+    "print(solution.fluxes['FBA'], solution.fluxes['NH4t'],\n",
+    "      solution.objective_value)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "## Objectives\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Simple objective such as the maximization of the flux through one or more reactions can conveniently be done by simply \n",
+    "assigning to the `model.objective` property as we have seen in previous chapters, e.g.,"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "0.8739215069684307\n"
+     ]
+    }
+   ],
+   "source": [
+    "model = cobra.test.create_test_model('textbook')\n",
+    "with model:\n",
+    "    model.objective = {model.reactions.Biomass_Ecoli_core: 1}\n",
+    "    model.optimize()\n",
+    "    print(model.reactions.Biomass_Ecoli_core.flux)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "The objectives mathematical expression is seen by"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "-1.0*Biomass_Ecoli_core_reverse_2cdba + 1.0*Biomass_Ecoli_core"
+      ]
+     },
+     "execution_count": 6,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model.objective.expression"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "But suppose we need a more complicated objective, such as minimizing the Euclidean distance of the solution to the origin minus another variable, while subject to additional linear constraints. This is an objective function with both linear and quadratic components. "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Consider the example problem:\n",
+    "\n",
+    "> **min** $\\frac{1}{2}\\left(x^2 + y^2 \\right) - y$\n",
+    "\n",
+    "> *subject to*\n",
+    "\n",
+    "> $x + y = 2$\n",
+    "\n",
+    "> $x \\ge 0$\n",
+    "\n",
+    "> $y \\ge 0$\n",
+    "\n",
+    "This (admittedly very artificial) problem can be visualized graphically where the optimum is indicated by the blue dot on the line of feasible solutions."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "data": {
+      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1R5cGUgL0NhdGFsb2cgL1BhZ2VzIDIgMCBSID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9YT2JqZWN0IDcgMCBSIC9Qcm9jU2V0IFsgL1BERiAvVGV4dCAv\nSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdCi9TaGFkaW5nIDYgMCBSIC9Gb250IDMgMCBSIC9QYXR0\nZXJuIDUgMCBSIC9FeHRHU3RhdGUgNCAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9Hcm91cCA8\nPCAvQ1MgL0RldmljZVJHQiAvVHlwZSAvR3JvdXAgL1MgL1RyYW5zcGFyZW5jeSA+PgovTWVkaWFC\nb3ggWyAwIDAgMzU2Ljg4ODA2ODE4MTggMjkzLjIgXSAvQ29udGVudHMgOSAwIFIgL0Fubm90cyBb [...]
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWQAAAElCAYAAADTH5jpAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXd4lGW6/z/vTHolCekkhISEEEhBIHQQpYgIiDRBrIuo\na1mP7tk9+9s9u5zdPXu563IUsayigkpTiIKIhh46CYHQ04CENCC9t8nM+/tjnFmqJM9MhoDP57q4\nZJj3KTMx93u/93Pf31tRVRWJRCKR3H40tlhEURRPxc6hUlGUgbZYTyKRSO5EbGKQ0Wg/BbzQ2h+2\nyXoSiURyB9LpBllRFE9FYzfd75E/oBiRXrJEIpHcgM73kDXaTx17xCjO4QNxi5+I4uC0stPXlEgk\nkjsQpbMP9RR7xxYFxUGxc0A1tKHqWkA12Kmqqu/UhSUSieQOw67TV2hrjVIhUG1rATgI9JXGWCKR\nSK6n [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0x7f13a1e5eac8>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "%matplotlib inline\n",
+    "import plot_helper\n",
+    "\n",
+    "plot_helper.plot_qp2()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "We return to the textbook model and set the solver to one that can handle quadratic objectives such as cplex. We then add the linear constraint that the sum of our x and y reactions, that we set to FBA and NH4t, must equal 2."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": true,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "model.solver = 'cplex'\n",
+    "sum_two = model.problem.Constraint(\n",
+    "    model.reactions.FBA.flux_expression + model.reactions.NH4t.flux_expression,\n",
+    "    lb=2,\n",
+    "    ub=2)\n",
+    "model.add_cons_vars(sum_two)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Next we add the quadratic objective"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "quadratic_objective = model.problem.Objective(\n",
+    "    0.5 * model.reactions.NH4t.flux_expression**2 + 0.5 *\n",
+    "    model.reactions.FBA.flux_expression**2 -\n",
+    "    model.reactions.FBA.flux_expression,\n",
+    "    direction='min')\n",
+    "model.objective = quadratic_objective\n",
+    "solution = model.optimize(objective_sense=None)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "0.5 1.5\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(solution.fluxes['NH4t'], solution.fluxes['FBA'])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "## Variables"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "We can also create additional variables to facilitate studying the effects of new constraints and variables. Suppose we want to study the difference in flux between nitrogen and carbon uptake whilst we block other reactions. For this it will may help to add another variable representing this difference."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {
+    "collapsed": true,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "model = cobra.test.create_test_model('textbook')\n",
+    "difference = model.problem.Variable('difference')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "We use constraints to define what values this variable shall take"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "constraint = model.problem.Constraint(\n",
+    "    model.reactions.EX_glc__D_e.flux_expression -\n",
+    "    model.reactions.EX_nh4_e.flux_expression - difference,\n",
+    "    lb=0,\n",
+    "    ub=0)\n",
+    "model.add_cons_vars([difference, constraint])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Now we can access that difference directly during our knock-out exploration by looking at its primal value."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "-5.234680806802543\n",
+      "-5.2346808068025386\n",
+      "-5.234680806802525\n",
+      "-1.8644444444444337\n",
+      "-1.8644444444444466\n"
+     ]
+    }
+   ],
+   "source": [
+    "for reaction in model.reactions[:5]:\n",
+    "    with model:\n",
+    "        reaction.knock_out()\n",
+    "        model.optimize()\n",
+    "        print(model.solver.variables.difference.primal)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/documentation_builder/deletions.ipynb b/documentation_builder/deletions.ipynb
index f426cf0..557dcb7 100644
--- a/documentation_builder/deletions.ipynb
+++ b/documentation_builder/deletions.ipynb
@@ -2,7 +2,10 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "# Simulating Deletions"
    ]
@@ -11,7 +14,9 @@
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
@@ -19,9 +24,9 @@
     "from time import time\n",
     "\n",
     "import cobra.test\n",
-    "from cobra.flux_analysis import \\\n",
-    "    single_gene_deletion, single_reaction_deletion, \\\n",
-    "    double_gene_deletion, double_reaction_deletion\n",
+    "from cobra.flux_analysis import (\n",
+    "    single_gene_deletion, single_reaction_deletion, double_gene_deletion,\n",
+    "    double_reaction_deletion)\n",
     "\n",
     "cobra_model = cobra.test.create_test_model(\"textbook\")\n",
     "ecoli_model = cobra.test.create_test_model(\"ecoli\")"
@@ -29,41 +34,132 @@
   },
   {
    "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "## Knocking out single genes and reactions"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "A commonly asked question when analyzing metabolic models is what will happen if a certain reaction was not allowed to have any flux at all. This can tested using cobrapy by"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "complete model:  <Solution 0.874 at 0x1118cc898>\n",
+      "pfk knocked out:  <Solution 0.704 at 0x1118cc5c0>\n"
+     ]
+    }
+   ],
+   "source": [
+    "print('complete model: ', cobra_model.optimize())\n",
+    "with cobra_model:\n",
+    "    cobra_model.reactions.PFK.knock_out()\n",
+    "    print('pfk knocked out: ', cobra_model.optimize())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "For evaluating genetic manipulation strategies, it is more interesting to examine what happens if given genes are knocked out as doing so can affect no reactions in case of redundancy, or more reactions if gene when is participating in more than one reaction."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "complete model:  <Solution 0.874 at 0x1108b81d0>\n",
+      "pfkA knocked out:  <Solution 0.874 at 0x1108b80b8>\n",
+      "pfkB knocked out:  <Solution 0.704 at 0x1108b8128>\n"
+     ]
+    }
+   ],
+   "source": [
+    "print('complete model: ', cobra_model.optimize())\n",
+    "with cobra_model:\n",
+    "    cobra_model.genes.b1723.knock_out()\n",
+    "    print('pfkA knocked out: ', cobra_model.optimize())\n",
+    "    cobra_model.genes.b3916.knock_out()\n",
+    "    print('pfkB knocked out: ', cobra_model.optimize())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
     "## Single Deletions"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "Perform all single gene deletions on a model"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 4,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
-    "growth_rates, statuses = single_gene_deletion(cobra_model)"
+    "deletion_results = single_gene_deletion(cobra_model)"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "These can also be done for only a subset of genes"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 5,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -74,7 +170,7 @@
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
        "      <th></th>\n",
-       "      <th>growth_rates</th>\n",
+       "      <th>flux</th>\n",
        "      <th>status</th>\n",
        "    </tr>\n",
        "  </thead>\n",
@@ -184,53 +280,55 @@
        "</div>"
       ],
       "text/plain": [
-       "       growth_rates   status\n",
-       "b0116      0.782351  optimal\n",
-       "b0118      0.873922  optimal\n",
-       "b0351      0.873922  optimal\n",
-       "b0356      0.873922  optimal\n",
-       "b0474      0.873922  optimal\n",
-       "b0726      0.858307  optimal\n",
-       "b0727      0.858307  optimal\n",
-       "b1241      0.873922  optimal\n",
-       "b1276      0.873922  optimal\n",
-       "b1478      0.873922  optimal\n",
-       "b1849      0.873922  optimal\n",
-       "b2296      0.873922  optimal\n",
-       "b2587      0.873922  optimal\n",
-       "b3115      0.873922  optimal\n",
-       "b3732      0.374230  optimal\n",
-       "b3733      0.374230  optimal\n",
-       "b3734      0.374230  optimal\n",
-       "b3735      0.374230  optimal\n",
-       "b3736      0.374230  optimal\n",
-       "s0001      0.211141  optimal"
+       "           flux   status\n",
+       "b0116  0.782351  optimal\n",
+       "b0118  0.873922  optimal\n",
+       "b0351  0.873922  optimal\n",
+       "b0356  0.873922  optimal\n",
+       "b0474  0.873922  optimal\n",
+       "b0726  0.858307  optimal\n",
+       "b0727  0.858307  optimal\n",
+       "b1241  0.873922  optimal\n",
+       "b1276  0.873922  optimal\n",
+       "b1478  0.873922  optimal\n",
+       "b1849  0.873922  optimal\n",
+       "b2296  0.873922  optimal\n",
+       "b2587  0.873922  optimal\n",
+       "b3115  0.873922  optimal\n",
+       "b3732  0.374230  optimal\n",
+       "b3733  0.374230  optimal\n",
+       "b3734  0.374230  optimal\n",
+       "b3735  0.374230  optimal\n",
+       "b3736  0.374230  optimal\n",
+       "s0001  0.211141  optimal"
       ]
      },
-     "execution_count": 3,
+     "execution_count": 5,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "gr, st = single_gene_deletion(cobra_model,\n",
-    "                              cobra_model.genes[:20])\n",
-    "pandas.DataFrame.from_dict({\"growth_rates\": gr,\n",
-    "                            \"status\": st})"
+    "single_gene_deletion(cobra_model, cobra_model.genes[:20])"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "This can also be done for reactions"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 6,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -241,109 +339,109 @@
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
        "      <th></th>\n",
-       "      <th>growth_rates</th>\n",
+       "      <th>flux</th>\n",
        "      <th>status</th>\n",
        "    </tr>\n",
        "  </thead>\n",
        "  <tbody>\n",
        "    <tr>\n",
        "      <th>ACALD</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACALDt</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACKr</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACONTa</th>\n",
-       "      <td>0.0000</td>\n",
+       "      <td>-5.039994e-13</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACONTb</th>\n",
-       "      <td>0.0000</td>\n",
+       "      <td>-1.477823e-12</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACt2r</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ADK1</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>AKGDH</th>\n",
-       "      <td>0.8583</td>\n",
+       "      <td>8.583074e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>AKGt2r</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ALCD2x</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ATPM</th>\n",
-       "      <td>0.9166</td>\n",
+       "      <td>9.166475e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ATPS4r</th>\n",
-       "      <td>0.3742</td>\n",
+       "      <td>3.742299e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>Biomass_Ecoli_core</th>\n",
-       "      <td>0.0000</td>\n",
+       "      <td>0.000000e+00</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>CO2t</th>\n",
-       "      <td>0.4617</td>\n",
+       "      <td>4.616696e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>CS</th>\n",
-       "      <td>-0.0000</td>\n",
+       "      <td>1.129472e-12</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>CYTBD</th>\n",
-       "      <td>0.2117</td>\n",
+       "      <td>2.116629e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>D_LACt2</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ENO</th>\n",
-       "      <td>-0.0000</td>\n",
+       "      <td>1.161773e-14</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ETOHt2r</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>EX_ac_e</th>\n",
-       "      <td>0.8739</td>\n",
+       "      <td>8.739215e-01</td>\n",
        "      <td>optimal</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
@@ -351,55 +449,65 @@
        "</div>"
       ],
       "text/plain": [
-       "                    growth_rates   status\n",
-       "ACALD                     0.8739  optimal\n",
-       "ACALDt                    0.8739  optimal\n",
-       "ACKr                      0.8739  optimal\n",
-       "ACONTa                    0.0000  optimal\n",
-       "ACONTb                    0.0000  optimal\n",
-       "ACt2r                     0.8739  optimal\n",
-       "ADK1                      0.8739  optimal\n",
-       "AKGDH                     0.8583  optimal\n",
-       "AKGt2r                    0.8739  optimal\n",
-       "ALCD2x                    0.8739  optimal\n",
-       "ATPM                      0.9166  optimal\n",
-       "ATPS4r                    0.3742  optimal\n",
-       "Biomass_Ecoli_core        0.0000  optimal\n",
-       "CO2t                      0.4617  optimal\n",
-       "CS                       -0.0000  optimal\n",
-       "CYTBD                     0.2117  optimal\n",
-       "D_LACt2                   0.8739  optimal\n",
-       "ENO                      -0.0000  optimal\n",
-       "ETOHt2r                   0.8739  optimal\n",
-       "EX_ac_e                   0.8739  optimal"
+       "                            flux   status\n",
+       "ACALD               8.739215e-01  optimal\n",
+       "ACALDt              8.739215e-01  optimal\n",
+       "ACKr                8.739215e-01  optimal\n",
+       "ACONTa             -5.039994e-13  optimal\n",
+       "ACONTb             -1.477823e-12  optimal\n",
+       "ACt2r               8.739215e-01  optimal\n",
+       "ADK1                8.739215e-01  optimal\n",
+       "AKGDH               8.583074e-01  optimal\n",
+       "AKGt2r              8.739215e-01  optimal\n",
+       "ALCD2x              8.739215e-01  optimal\n",
+       "ATPM                9.166475e-01  optimal\n",
+       "ATPS4r              3.742299e-01  optimal\n",
+       "Biomass_Ecoli_core  0.000000e+00  optimal\n",
+       "CO2t                4.616696e-01  optimal\n",
+       "CS                  1.129472e-12  optimal\n",
+       "CYTBD               2.116629e-01  optimal\n",
+       "D_LACt2             8.739215e-01  optimal\n",
+       "ENO                 1.161773e-14  optimal\n",
+       "ETOHt2r             8.739215e-01  optimal\n",
+       "EX_ac_e             8.739215e-01  optimal"
       ]
      },
-     "execution_count": 4,
+     "execution_count": 6,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "gr, st = single_reaction_deletion(cobra_model,\n",
-    "                                  cobra_model.reactions[:20])\n",
-    "pandas.DataFrame.from_dict({\"growth_rates\": gr,\n",
-    "                            \"status\": st}).round(4)"
+    "single_reaction_deletion(cobra_model, cobra_model.reactions[:20])"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "## Double Deletions\n",
-    "\n",
-    "Double deletions run in a similar way. Passing in return_frame=True will cause them to format the results as a pandas Dataframe"
+    "## Double Deletions"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Double deletions run in a similar way. Passing in `return_frame=True` will cause them to format the results as a `pandas.DataFrame`."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 7,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -471,51 +579,56 @@
        "b3919  0.7040  0.7040  0.7040  0.7040  0.704"
       ]
      },
-     "execution_count": 5,
+     "execution_count": 7,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "double_gene_deletion(cobra_model, cobra_model.genes[-5:],\n",
-    "                     return_frame=True).round(4)"
+    "double_gene_deletion(\n",
+    "    cobra_model, cobra_model.genes[-5:], return_frame=True).round(4)"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "By default, the double deletion function will automatically use multiprocessing, splitting the task over up to 4 cores if they are available. The number of cores can be manually sepcified as well. Setting use of a single core will disable use of the multiprocessing library, which often aids debuggging."
+    "By default, the double deletion function will automatically use multiprocessing, splitting the task over up to 4 cores if they are available. The number of cores can be manually specified as well. Setting use of a single core will disable use of the multiprocessing library, which often aids debugging."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 8,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Double gene deletions for 200 genes completed in 27.03 sec with 2 cores\n",
-      "Double gene deletions for 200 genes completed in 40.73 sec with 1 core\n",
-      "Speedup of 1.51x\n"
+      "Double gene deletions for 200 genes completed in 33.26 sec with 2 cores\n",
+      "Double gene deletions for 200 genes completed in 45.38 sec with 1 core\n",
+      "Speedup of 1.36x\n"
      ]
     }
    ],
    "source": [
     "start = time()  # start timer()\n",
-    "double_gene_deletion(ecoli_model, ecoli_model.genes[:300],\n",
-    "                     number_of_processes=2)\n",
+    "double_gene_deletion(\n",
+    "    ecoli_model, ecoli_model.genes[:300], number_of_processes=2)\n",
     "t1 = time() - start\n",
     "print(\"Double gene deletions for 200 genes completed in \"\n",
     "      \"%.2f sec with 2 cores\" % t1)\n",
     "\n",
     "start = time()  # start timer()\n",
-    "double_gene_deletion(ecoli_model, ecoli_model.genes[:300],\n",
-    "                     number_of_processes=1)\n",
+    "double_gene_deletion(\n",
+    "    ecoli_model, ecoli_model.genes[:300], number_of_processes=1)\n",
     "t2 = time() - start\n",
     "print(\"Double gene deletions for 200 genes completed in \"\n",
     "      \"%.2f sec with 1 core\" % t2)\n",
@@ -525,16 +638,21 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "Double deletions can also be run for reactions"
+    "Double deletions can also be run for reactions."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 9,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -575,7 +693,7 @@
        "      <td>0.0</td>\n",
        "      <td>0.0</td>\n",
        "      <td>0.0000</td>\n",
-       "      <td>0.0000</td>\n",
+       "      <td>-0.0000</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACt2r</th>\n",
@@ -589,7 +707,7 @@
        "      <th>ADK1</th>\n",
        "      <td>0.8739</td>\n",
        "      <td>0.0</td>\n",
-       "      <td>0.0</td>\n",
+       "      <td>-0.0</td>\n",
        "      <td>0.8739</td>\n",
        "      <td>0.8739</td>\n",
        "    </tr>\n",
@@ -601,20 +719,19 @@
        "          ACKr  ACONTa  ACONTb   ACt2r    ADK1\n",
        "ACKr    0.8739     0.0     0.0  0.8739  0.8739\n",
        "ACONTa  0.0000     0.0     0.0  0.0000  0.0000\n",
-       "ACONTb  0.0000     0.0     0.0  0.0000  0.0000\n",
+       "ACONTb  0.0000     0.0     0.0  0.0000 -0.0000\n",
        "ACt2r   0.8739     0.0     0.0  0.8739  0.8739\n",
-       "ADK1    0.8739     0.0     0.0  0.8739  0.8739"
+       "ADK1    0.8739     0.0    -0.0  0.8739  0.8739"
       ]
      },
-     "execution_count": 7,
+     "execution_count": 9,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "double_reaction_deletion(cobra_model,\n",
-    "                         cobra_model.reactions[2:7],\n",
-    "                         return_frame=True).round(4)"
+    "double_reaction_deletion(\n",
+    "    cobra_model, cobra_model.reactions[2:7], return_frame=True).round(4)"
    ]
   }
  ],
@@ -634,7 +751,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.6.0"
   }
  },
  "nbformat": 4,
diff --git a/documentation_builder/faq.ipynb b/documentation_builder/faq.ipynb
index 9fdde88..eef1700 100644
--- a/documentation_builder/faq.ipynb
+++ b/documentation_builder/faq.ipynb
@@ -18,7 +18,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### How do I install cobrapy?"
+    "## How do I install cobrapy?"
    ]
   },
   {
@@ -32,7 +32,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### How do I cite cobrapy?"
+    "## How do I cite cobrapy?"
    ]
   },
   {
@@ -46,14 +46,14 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### How do I rename reactions or metabolites?"
+    "## How do I rename reactions or metabolites?"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "TL;DR Use Model.repair afterwards\n",
+    "TL;DR Use `Model.repair` afterwards\n",
     "\n",
     "When renaming metabolites or reactions, there are issues because cobra indexes based off of ID's, which can cause errors. For example:"
    ]
@@ -61,18 +61,8 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "KeyError('test_dcaACP_c',)\n"
-     ]
-    }
-   ],
+   "metadata": {},
+   "outputs": [],
    "source": [
     "from __future__ import print_function\n",
     "import cobra.test\n",
@@ -97,14 +87,30 @@
   {
    "cell_type": "code",
    "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Metabolite identifier</strong></td><td>test_dcaACP_c</td>\n",
+       "            </tr>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td><td>Decanoyl-ACP-n-C100ACP</td>\n",
+       "            </tr>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x0110f09630</td>\n",
+       "            </tr><tr>\n",
+       "            <tr>\n",
+       "                <td><strong>Formula</strong></td><td>C21H39N2O8PRS</td>\n",
+       "            </tr>\n",
+       "        </table>"
+      ],
       "text/plain": [
-       "<Metabolite test_dcaACP_c at 0x7f90c2b97978>"
+       "<Metabolite test_dcaACP_c at 0x110f09630>"
       ]
      },
      "execution_count": 2,
@@ -121,7 +127,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### How do I delete a gene?"
+    "## How do I delete a gene?"
    ]
   },
   {
@@ -130,15 +136,13 @@
    "source": [
     "That depends on what precisely you mean by delete a gene.\n",
     "\n",
-    "If you want to simulate the model with a gene knockout, use the cobra.maniupulation.delete_model_genes function. The effects of this function are reversed by cobra.manipulation.undelete_model_genes."
+    "If you want to simulate the model with a gene knockout, use the `cobra.manipulation.delete_model_genes` function. The effects of this function are reversed by `cobra.manipulation.undelete_model_genes`."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -161,29 +165,27 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "If you want to actually remove all traces of a gene from a model, this is more difficult because this will require changing all the gene_reaction_rule strings for reactions involving the gene."
+    "If you want to actually remove all traces of a gene from a model, this is more difficult because this will require changing all the `gene_reaction_rule` strings for reactions involving the gene."
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### How do I change the reversibility of a Reaction?"
+    "## How do I change the reversibility of a Reaction?"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Reaction.reversibility is a property in cobra which is computed when it is requested from the lower and upper bounds."
+    "`Reaction.reversibility` is a property in cobra which is computed when it is requested from the lower and upper bounds."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 4,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -211,15 +213,13 @@
   {
    "cell_type": "code",
    "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "cobra/core/Reaction.py:192 \u001b[1;31mUserWarning\u001b[0m: Setting reaction reversibility is ignored\n"
+      "cobra/core/reaction.py:501 \u001b[1;31mUserWarning\u001b[0m: Setting reaction reversibility is ignored\n"
      ]
     }
    ],
@@ -240,9 +240,7 @@
   {
    "cell_type": "code",
    "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -264,51 +262,66 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### How do I generate an LP file from a COBRA model?"
+    "## How do I generate an LP file from a COBRA model?"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "While the cobrapy does not include python code to support this feature directly, many of the bundled solvers have this capability. Create the problem with one of these solvers, and use its appropriate function.\n",
-    "\n",
-    "Please note that unlike the LP file format, the MPS file format does not specify objective direction and is always a minimzation. Some (but not all) solvers will rewrite the maximization as a minimzation."
+    "### For optlang based solvers"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "With optlang solvers, the LP formulation of a model is obtained by it's string representation. All solvers behave the same way."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 7,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
-   "outputs": [
-    {
-     "ename": "AttributeError",
-     "evalue": "'module' object has no attribute 'gurobi_solver'",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
-      "\u001b[1;32m<ipython-input-7-76bbced9b9d6>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mglp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"test.mps\"\u001b[0m\u001b[1;33m)\u001b[0m  \u001b[1;31m# will not rewrite objective\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      6\u001b[0m \u001b[1;31m# gurobi\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 7 [...]
-      "\u001b[1;31mAttributeError\u001b[0m: 'module' object has no attribute 'gurobi_solver'"
-     ]
-    }
-   ],
+   "outputs": [],
+   "source": [
+    "with open('test.lp', 'w') as out:\n",
+    "    out.write(str(model.solver))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### For cobrapy's internal solvers"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "With the internal solvers, we first create the problem and use functions bundled with the solver. \n",
+    "\n",
+    "Please note that unlike the LP file format, the MPS file format does not specify objective direction and is always a minimization. Some (but not all) solvers will rewrite the maximization as a minimization."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [],
    "source": [
     "model = cobra.test.create_test_model()\n",
     "# glpk through cglpk\n",
-    "glp = cobra.solvers.cglpk.create_problem(model)\n",
-    "glp.write(\"test.lp\")\n",
-    "glp.write(\"test.mps\")  # will not rewrite objective\n",
-    "# gurobi\n",
-    "gurobi_problem = cobra.solvers.gurobi_solver.create_problem(model)\n",
-    "gurobi_problem.write(\"test.lp\")\n",
-    "gurobi_problem.write(\"test.mps\")  # rewrites objective\n",
+    "glpk = cobra.solvers.cglpk.create_problem(model)\n",
+    "glpk.write(\"test.lp\")\n",
+    "glpk.write(\"test.mps\")  # will not rewrite objective\n",
     "# cplex\n",
-    "cplex_problem = cobra.solvers.cplex_solver.create_problem(model)\n",
-    "cplex_problem.write(\"test.lp\")\n",
-    "cplex_problem.write(\"test.mps\")  # rewrites objective"
+    "cplex = cobra.solvers.cplex_solver.create_problem(model)\n",
+    "cplex.write(\"test.lp\")\n",
+    "cplex.write(\"test.mps\")  # rewrites objective"
    ]
   },
   {
@@ -342,9 +355,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.6.0"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
 }
diff --git a/documentation_builder/gapfilling.ipynb b/documentation_builder/gapfilling.ipynb
index 4c3ada6..a149f00 100644
--- a/documentation_builder/gapfilling.ipynb
+++ b/documentation_builder/gapfilling.ipynb
@@ -6,25 +6,64 @@
    "source": [
     "# Gapfillling\n",
     "\n",
-    "GrowMatch and SMILEY are gap-filling algorithms, which try to to make the minimal number of changes to a model and allow it to simulate growth. For more information, see [Kumar et al.](http://dx.doi.org/10.1371/journal.pcbi.1000308). Please note that these algorithms are Mixed-Integer Linear Programs, which need solvers such as gurobi or cplex to function correctly."
+    "Model gap filling is the task of figuring out which reactions have to be added to a model to make it feasible. Several such algorithms have been reported e.g. [Kumar et al. 2009](http://dx.doi.org/10.1371/journal.pcbi.1000308) and [Reed et al. 2006](http://www.pnas.org/content/103/46/17480.short). Cobrapy has a gap filling implementation that is very similar to that of Reed et al. where we use a mixed-integer linear program to figure out the smallest number of reactions that need to [...]
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Minimize: $$\\sum_i c_i * z_i$$"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "subject to"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "$$Sv = 0$$\n",
+    "$$v^\\star \\geq t$$\n",
+    "$$l_i\\leq v_i \\leq u_i$$\n",
+    "$$v_i = 0 \\textrm{ if } z_i = 0$$"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Where *l*, *u* are lower and upper bounds for reaction *i* and *z* is an indicator variable that is zero if the reaction is not used and otherwise 1, *c* is a user-defined cost associated with using the *i*th reaction, $v^\\star$ is the flux of the objective and *t* a lower bound for that objective. To demonstrate, let's take a model and remove some essential reactions from it."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
     "import cobra.test\n",
-    "\n",
+    "from cobra.flux_analysis import gapfill\n",
     "model = cobra.test.create_test_model(\"salmonella\")"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "In this model D-Fructose-6-phosphate is an essential metabolite. We will remove all the reactions using it, and at them to a separate model."
    ]
@@ -33,21 +72,25 @@
    "cell_type": "code",
    "execution_count": 2,
    "metadata": {
-    "collapsed": true
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
-    "# remove some reactions and add them to the universal reactions\n",
-    "Universal = cobra.Model(\"Universal_Reactions\")\n",
+    "universal = cobra.Model(\"universal_reactions\")\n",
     "for i in [i.id for i in model.metabolites.f6p_c.reactions]:\n",
     "    reaction = model.reactions.get_by_id(i)\n",
-    "    Universal.add_reaction(reaction.copy())\n",
-    "    reaction.remove_from_model()"
+    "    universal.add_reaction(reaction.copy())\n",
+    "    model.remove_reactions([reaction])"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "Now, because of these gaps, the model won't grow."
    ]
@@ -56,13 +99,15 @@
    "cell_type": "code",
    "execution_count": 3,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "2.821531499799383e-12"
+       "0.0"
       ]
      },
      "execution_count": 3,
@@ -71,23 +116,26 @@
     }
    ],
    "source": [
-    "model.optimize().f"
+    "model.optimize().objective_value"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "## GrowMatch\n",
-    "\n",
-    "We will use GrowMatch to add back the minimal number of reactions from this set of \"universal\" reactions (in this case just the ones we removed) to allow it to grow."
+    "We will use can use the model's original objective, growth, to figure out which of the removed reactions are required for the model be feasible again. This is very similar to making the 'no-growth but growth (NGG)' predictions of [Kumar et al. 2009](http://dx.doi.org/10.1371/journal.pcbi.1000308)."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 4,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -95,22 +143,25 @@
      "output_type": "stream",
      "text": [
       "GF6PTA\n",
+      "F6PP\n",
+      "TKT2\n",
       "FBP\n",
-      "MAN6PI_reverse\n",
-      "TKT2_reverse\n",
-      "PGI_reverse\n"
+      "MAN6PI\n"
      ]
     }
    ],
    "source": [
-    "r = cobra.flux_analysis.growMatch(model, Universal)\n",
-    "for e in r[0]:\n",
-    "    print(e.id)"
+    "solution = gapfill(model, universal, demand_reactions=False)\n",
+    "for reaction in solution[0]:\n",
+    "    print(reaction.id)"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "We can obtain multiple possible reaction sets by having the algorithm go through multiple iterations."
    ]
@@ -119,7 +170,9 @@
    "cell_type": "code",
    "execution_count": 5,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -128,34 +181,33 @@
      "text": [
       "---- Run 1 ----\n",
       "GF6PTA\n",
+      "F6PP\n",
+      "TKT2\n",
       "FBP\n",
-      "MAN6PI_reverse\n",
-      "TKT2_reverse\n",
-      "PGI_reverse\n",
+      "MAN6PI\n",
       "---- Run 2 ----\n",
-      "F6PP\n",
       "GF6PTA\n",
       "TALA\n",
-      "MAN6PI_reverse\n",
-      "F6PA_reverse\n",
+      "PGI\n",
+      "F6PA\n",
+      "MAN6PI\n",
       "---- Run 3 ----\n",
       "GF6PTA\n",
-      "MAN6PI_reverse\n",
-      "TKT2_reverse\n",
-      "F6PA_reverse\n",
-      "PGI_reverse\n",
-      "---- Run 4 ----\n",
       "F6PP\n",
-      "GF6PTA\n",
+      "TKT2\n",
       "FBP\n",
+      "MAN6PI\n",
+      "---- Run 4 ----\n",
+      "GF6PTA\n",
       "TALA\n",
-      "MAN6PI_reverse\n"
+      "PGI\n",
+      "F6PA\n",
+      "MAN6PI\n"
      ]
     }
    ],
    "source": [
-    "result = cobra.flux_analysis.growMatch(model, Universal,\n",
-    "                                       iterations=4)\n",
+    "result = gapfill(model, universal, demand_reactions=False, iterations=4)\n",
     "for i, entries in enumerate(result):\n",
     "    print(\"---- Run %d ----\" % (i + 1))\n",
     "    for e in entries:\n",
@@ -164,37 +216,47 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "## SMILEY\n",
-    "\n",
-    "SMILEY is very similar to growMatch, only instead of setting growth as the objective, it sets production of a specific metabolite"
+    "We can also instead of using the original objective, specify a given metabolite that we want the model to be able to produce."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 6,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "GF6PTA\n",
-      "MAN6PI_reverse\n",
-      "TKT2_reverse\n",
-      "F6PA_reverse\n",
-      "PGI_reverse\n"
+      "FBP\n"
      ]
     }
    ],
    "source": [
-    "r = cobra.flux_analysis.gapfilling.SMILEY(model, \"ac_e\",\n",
-    "                                          Universal)\n",
-    "for e in r[0]:\n",
-    "    print(e.id)"
+    "with model:\n",
+    "    model.objective = model.add_boundary(model.metabolites.f6p_c, type='demand')\n",
+    "    solution = gapfill(model, universal)\n",
+    "    for reaction in solution[0]:\n",
+    "        print(reaction.id)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Finally, note that using mixed-integer linear programming is computationally quite expensive and for larger models you may want to consider alternative [gap filling methods](http://opencobra.github.io/cobrapy/tags/gapfilling/) and [reconstruction methods](http://opencobra.github.io/cobrapy/tags/reconstruction/)."
    ]
   }
  ],
@@ -214,7 +276,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.6.0"
   }
  },
  "nbformat": 4,
diff --git a/documentation_builder/getting_started.ipynb b/documentation_builder/getting_started.ipynb
index c3b0874..f5ba876 100644
--- a/documentation_builder/getting_started.ipynb
+++ b/documentation_builder/getting_started.ipynb
@@ -4,20 +4,32 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Getting Started\n",
-    "\n",
+    "# Getting Started"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Loading a model and inspecting it"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "To begin with, cobrapy comes with bundled models for _Salmonella_ and _E. coli_, as well as a \"textbook\" model of _E. coli_ core metabolism. To load a test model, type"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from __future__ import print_function\n",
+    "\n",
+    "import cobra\n",
     "import cobra.test\n",
     "\n",
     "# \"ecoli\" and \"salmonella\" are also valid arguments\n",
@@ -28,14 +40,14 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The reactions, metabolites, and genes attributes of the cobrapy model are a special type of list called a DictList, and each one is made up of Reaction, Metabolite and Gene objects respectively."
+    "The reactions, metabolites, and genes attributes of the cobrapy model are a special type of list called a `cobra.DictList`, and each one is made up of `cobra.Reaction`, `cobra.Metabolite` and `cobra.Gene` objects respectively."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 2,
    "metadata": {
-    "collapsed": false
+    "scrolled": true
    },
    "outputs": [
     {
@@ -58,20 +70,42 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Just like a regular list, objects in the DictList can be retrived by index. For example, to get the 30th reaction in the model (at index 29 because of [0-indexing](https://en.wikipedia.org/wiki/Zero-based_numbering)):"
+    "When using [Jupyter notebook](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/) this type of information is rendered as a table."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>e_coli_core</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x01116ea9e8</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>72</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>95</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*Biomass_Ecoli_core_reverse_2cdba + 1.0*Biomass_Ecoli_core</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>cytosol, extracellular</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
       "text/plain": [
-       "<Reaction EX_glu__L_e at 0x7f56b0ea3198>"
+       "<Model e_coli_core at 0x1116ea9e8>"
       ]
      },
      "execution_count": 3,
@@ -80,27 +114,51 @@
     }
    ],
    "source": [
-    "model.reactions[29]"
+    "model"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Addictionally, items can be retrived by their id using the get_by_id() function. For example, to get the cytosolic atp metabolite object (the id is \"atp_c\"), we can do the following:"
+    "Just like a regular list, objects in the `DictList` can be retrieved by index. For example, to get the 30th reaction in the model (at index 29 because of [0-indexing](https://en.wikipedia.org/wiki/Zero-based_numbering)):"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 4,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Reaction identifier</strong></td><td>EX_glu__L_e</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Name</strong></td><td>L-Glutamate exchange</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x011b8643c8</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Stoichiometry</strong></td>\n",
+       "                <td>\n",
+       "                    <p style='text-align:right'>glu__L_e --> </p>\n",
+       "                    <p style='text-align:right'>L-Glutamate --> </p>\n",
+       "                </td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>GPR</strong></td><td></td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Lower bound</strong></td><td>0.0</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Upper bound</strong></td><td>1000.0</td>\n",
+       "            </tr>\n",
+       "        </table>\n",
+       "        "
+      ],
       "text/plain": [
-       "<Metabolite atp_c at 0x7f56b0ed7cc0>"
+       "<Reaction EX_glu__L_e at 0x11b8643c8>"
       ]
      },
      "execution_count": 4,
@@ -109,27 +167,45 @@
     }
    ],
    "source": [
-    "model.metabolites.get_by_id(\"atp_c\")"
+    "model.reactions[29]"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "As an added bonus, users with an interactive shell such as IPython will be able to tab-complete to list elements inside a list. While this is not recommended behavior for most code because of the possibility for characters like \"-\" inside ids, this is very useful while in an interactive prompt:"
+    "Additionally, items can be retrieved by their `id` using the `DictList.get_by_id()` function. For example, to get the cytosolic atp metabolite object (the id is \"atp_c\"), we can do the following:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Metabolite identifier</strong></td><td>atp_c</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Name</strong></td><td>ATP</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x011b7f82b0</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Formula</strong></td><td>C10H12N5O13P3</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartment</strong></td><td>c</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>In 13 reaction(s)</strong></td><td>\n",
+       "                    PYK, GLNS, ATPS4r, SUCOAS, PPCK, GLNabc, ATPM, ACKr, Biomass_Ecoli_core, ADK1, PPS, PFK, PGK</td>\n",
+       "            </tr>\n",
+       "        </table>"
+      ],
       "text/plain": [
-       "-10.0"
+       "<Metabolite atp_c at 0x11b7f82b0>"
       ]
      },
      "execution_count": 5,
@@ -138,7 +214,34 @@
     }
    ],
    "source": [
-    "model.reactions.EX_glc__D_e.lower_bound"
+    "model.metabolites.get_by_id(\"atp_c\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As an added bonus, users with an interactive shell such as IPython will be able to tab-complete to list elements inside a list. While this is not recommended behavior for most code because of the possibility for characters like \"-\" inside ids, this is very useful while in an interactive prompt:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(-10.0, 1000.0)"
+      ]
+     },
+     "execution_count": 6,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model.reactions.EX_glc__D_e.bounds"
    ]
   },
   {
@@ -157,18 +260,42 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 7,
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Reaction identifier</strong></td><td>PGI</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Name</strong></td><td>glucose-6-phosphate isomerase</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x011b886a90</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Stoichiometry</strong></td>\n",
+       "                <td>\n",
+       "                    <p style='text-align:right'>g6p_c <=> f6p_c</p>\n",
+       "                    <p style='text-align:right'>D-Glucose 6-phosphate <=> D-Fructose 6-phosphate</p>\n",
+       "                </td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>GPR</strong></td><td>b4025</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Lower bound</strong></td><td>-1000.0</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Upper bound</strong></td><td>1000.0</td>\n",
+       "            </tr>\n",
+       "        </table>\n",
+       "        "
+      ],
       "text/plain": [
-       "<Reaction PGI at 0x7f56b0e396d8>"
+       "<Reaction PGI at 0x11b886a90>"
       ]
      },
-     "execution_count": 6,
+     "execution_count": 7,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -187,10 +314,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 8,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -210,15 +335,13 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We can also view reaction upper and lower bounds. Because the pgi.lower_bound < 0, and pgi.upper_bound > 0, pgi is reversible"
+    "We can also view reaction upper and lower bounds. Because the `pgi.lower_bound` < 0, and `pgi.upper_bound` > 0, `pgi` is reversible."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 9,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -243,10 +366,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 10,
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -254,7 +375,7 @@
        "{}"
       ]
      },
-     "execution_count": 9,
+     "execution_count": 10,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -267,15 +388,13 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "In order to add a metabolite, we pass in a dict with the metabolite object and its coefficient"
+    "In order to add a metabolite, we pass in a `dict` with the metabolite object and its coefficient"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 11,
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -283,7 +402,7 @@
        "'g6p_c + h_c <=> f6p_c'"
       ]
      },
-     "execution_count": 10,
+     "execution_count": 11,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -303,9 +422,7 @@
   {
    "cell_type": "code",
    "execution_count": 11,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -332,9 +449,7 @@
   {
    "cell_type": "code",
    "execution_count": 12,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -346,7 +461,7 @@
     }
    ],
    "source": [
-    "pgi.pop(model.metabolites.get_by_id(\"h_c\"))\n",
+    "pgi.subtract_metabolites({model.metabolites.get_by_id(\"h_c\"): -1})\n",
     "print(pgi.reaction)\n",
     "print(pgi.check_mass_balance())"
    ]
@@ -361,9 +476,7 @@
   {
    "cell_type": "code",
    "execution_count": 13,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -381,9 +494,7 @@
   {
    "cell_type": "code",
    "execution_count": 14,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -403,9 +514,7 @@
   {
    "cell_type": "code",
    "execution_count": 15,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -434,20 +543,38 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We will consider cytosolic atp as our metabolite, which has the id atp_c in our test model."
+    "We will consider cytosolic atp as our metabolite, which has the id `\"atp_c\"` in our test model."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 16,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Metabolite identifier</strong></td><td>atp_c</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Name</strong></td><td>ATP</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x011b7f82b0</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Formula</strong></td><td>C10H12N5O13P3</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartment</strong></td><td>c</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>In 13 reaction(s)</strong></td><td>\n",
+       "                    PYK, GLNS, ATPS4r, SUCOAS, PPCK, GLNabc, ATPM, ACKr, Biomass_Ecoli_core, ADK1, PPS, PFK, PGK</td>\n",
+       "            </tr>\n",
+       "        </table>"
+      ],
       "text/plain": [
-       "<Metabolite atp_c at 0x7f56b0ed7cc0>"
+       "<Metabolite atp_c at 0x11b7f82b0>"
       ]
      },
      "execution_count": 16,
@@ -464,15 +591,13 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We can print out the metabolite name and compartment (cytosol in this case)."
+    "We can print out the metabolite name and compartment (cytosol in this case) directly as string."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 17,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -498,9 +623,7 @@
   {
    "cell_type": "code",
    "execution_count": 18,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -527,9 +650,7 @@
   {
    "cell_type": "code",
    "execution_count": 19,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -547,15 +668,13 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The reactions attribute gives a frozenset of all reactions using the given metabolite. We can use this to count the number of reactions which use atp."
+    "The reactions attribute gives a `frozenset` of all reactions using the given metabolite. We can use this to count the number of reactions which use atp."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 20,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -582,17 +701,15 @@
   {
    "cell_type": "code",
    "execution_count": 21,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "frozenset({<Reaction GLCpts at 0x7f56b0eabcc0>,\n",
-       "           <Reaction Biomass_Ecoli_core at 0x7f56b0e9e358>,\n",
-       "           <Reaction G6PDH2r at 0x7f56b0eab9e8>,\n",
-       "           <Reaction PGI at 0x7f56b0e396d8>})"
+       "frozenset({<Reaction G6PDH2r at 0x11b870c88>,\n",
+       "           <Reaction GLCpts at 0x11b870f98>,\n",
+       "           <Reaction PGI at 0x11b886a90>,\n",
+       "           <Reaction Biomass_Ecoli_core at 0x11b85a5f8>})"
       ]
      },
      "execution_count": 21,
@@ -615,7 +732,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in [Schellenberger et al 2011 Nature Protocols 6(9):1290-307](http://dx.doi.org/doi:10.1038/nprot.2011.308).\n",
+    "The `gene_reaction_rule` is a boolean representation of the gene requirements for this reaction to be active as described in [Schellenberger et al 2011 Nature Protocols 6(9):1290-307](http://dx.doi.org/doi:10.1038/nprot.2011.308).\n",
     "\n",
     "The GPR is stored as the gene_reaction_rule for a Reaction object as a string."
    ]
@@ -623,9 +740,7 @@
   {
    "cell_type": "code",
    "execution_count": 22,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -653,14 +768,12 @@
   {
    "cell_type": "code",
    "execution_count": 23,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "frozenset({<Gene b4025 at 0x7f56b0e8fac8>})"
+       "frozenset({<Gene b4025 at 0x11b844cc0>})"
       ]
      },
      "execution_count": 23,
@@ -675,14 +788,30 @@
   {
    "cell_type": "code",
    "execution_count": 24,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Gene identifier</strong></td><td>b4025</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Name</strong></td><td>pgi</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x011b844cc0</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Functional</strong></td><td>True</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>In 1 reaction(s)</strong></td><td>\n",
+       "                    PGI</td>\n",
+       "            </tr>\n",
+       "        </table>"
+      ],
       "text/plain": [
-       "<Gene b4025 at 0x7f56b0e8fac8>"
+       "<Gene b4025 at 0x11b844cc0>"
       ]
      },
      "execution_count": 24,
@@ -705,14 +834,12 @@
   {
    "cell_type": "code",
    "execution_count": 25,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "frozenset({<Reaction PGI at 0x7f56b0e396d8>})"
+       "frozenset({<Reaction PGI at 0x11b886a90>})"
       ]
      },
      "execution_count": 25,
@@ -734,14 +861,12 @@
   {
    "cell_type": "code",
    "execution_count": 26,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "frozenset({<Gene eggs at 0x7f56b0e35ba8>, <Gene spam at 0x7f56b0e390f0>})"
+       "frozenset({<Gene spam at 0x11b850908>, <Gene eggs at 0x11b850eb8>})"
       ]
      },
      "execution_count": 26,
@@ -757,9 +882,7 @@
   {
    "cell_type": "code",
    "execution_count": 27,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
@@ -786,14 +909,30 @@
   {
    "cell_type": "code",
    "execution_count": 28,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Gene identifier</strong></td><td>spam</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Name</strong></td><td></td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x011b850908</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Functional</strong></td><td>True</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>In 1 reaction(s)</strong></td><td>\n",
+       "                    PGI</td>\n",
+       "            </tr>\n",
+       "        </table>"
+      ],
       "text/plain": [
-       "<Gene spam at 0x7f56b0e390f0>"
+       "<Gene spam at 0x11b850908>"
       ]
      },
      "execution_count": 28,
@@ -809,15 +948,13 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The delete_model_genes function will evaluate the gpr and set the upper and lower bounds to 0 if the reaction is knocked out. This function can preserve existing deletions or reset them using the cumulative_deletions flag."
+    "The `delete_model_genes` function will evaluate the GPR and set the upper and lower bounds to 0 if the reaction is knocked out. This function can preserve existing deletions or reset them using the `cumulative_deletions` flag."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 29,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -829,15 +966,13 @@
     }
    ],
    "source": [
-    "cobra.manipulation.delete_model_genes(model, [\"spam\"],\n",
-    "                                      cumulative_deletions=True)\n",
-    "print(\"after 1 KO: %4d < flux_PGI < %4d\" %\n",
-    "      (pgi.lower_bound, pgi.upper_bound))\n",
+    "cobra.manipulation.delete_model_genes(\n",
+    "    model, [\"spam\"], cumulative_deletions=True)\n",
+    "print(\"after 1 KO: %4d < flux_PGI < %4d\" % (pgi.lower_bound, pgi.upper_bound))\n",
     "\n",
-    "cobra.manipulation.delete_model_genes(model, [\"eggs\"],\n",
-    "                                      cumulative_deletions=True)\n",
-    "print(\"after 2 KO:  %4d < flux_PGI < %4d\" %\n",
-    "      (pgi.lower_bound, pgi.upper_bound))"
+    "cobra.manipulation.delete_model_genes(\n",
+    "    model, [\"eggs\"], cumulative_deletions=True)\n",
+    "print(\"after 2 KO:  %4d < flux_PGI < %4d\" % (pgi.lower_bound, pgi.upper_bound))"
    ]
   },
   {
@@ -850,9 +985,7 @@
   {
    "cell_type": "code",
    "execution_count": 30,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -866,6 +999,141 @@
     "cobra.manipulation.undelete_model_genes(model)\n",
     "print(pgi.lower_bound, \"< pgi <\", pgi.upper_bound)"
    ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Making changes reversibly using models as contexts"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Quite often, one wants to make small changes to a model and evaluate the impacts of these. For example, we may want to knock-out all reactions sequentially, and see what the impact of this is on the objective function. One way of doing this would be to create a new copy of the model before each knock-out with `model.copy()`. However, even with small models, this is a very slow approach as models are quite complex objects. Better then would be to do the knock-out, optimizing and then [...]
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 31,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "ACALD blocked (bounds: (0, 0)), new growth rate 0.873922\n",
+      "ACALDt blocked (bounds: (0, 0)), new growth rate 0.873922\n",
+      "ACKr blocked (bounds: (0, 0)), new growth rate 0.873922\n",
+      "ACONTa blocked (bounds: (0, 0)), new growth rate -0.000000\n",
+      "ACONTb blocked (bounds: (0, 0)), new growth rate -0.000000\n"
+     ]
+    }
+   ],
+   "source": [
+    "model = cobra.test.create_test_model('textbook')\n",
+    "for reaction in model.reactions[:5]:\n",
+    "    with model as model:\n",
+    "        reaction.knock_out()\n",
+    "        model.optimize()\n",
+    "        print('%s blocked (bounds: %s), new growth rate %f' %\n",
+    "              (reaction.id, str(reaction.bounds), model.objective.value))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If we look at those knocked reactions, see that their bounds have all been reverted."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 32,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[(-1000.0, 1000.0),\n",
+       " (-1000.0, 1000.0),\n",
+       " (-1000.0, 1000.0),\n",
+       " (-1000.0, 1000.0),\n",
+       " (-1000.0, 1000.0)]"
+      ]
+     },
+     "execution_count": 32,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "[reaction.bounds for reaction in model.reactions[:5]]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Nested contexts are also supported"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "original objective:  -1.0*Biomass_Ecoli_core_reverse_2cdba + 1.0*Biomass_Ecoli_core\n",
+      "print objective in first context: -1.0*ATPM_reverse_5b752 + 1.0*ATPM\n",
+      "print objective in second context: 1.0*ACALD - 1.0*ACALD_reverse_fda2b\n",
+      "objective after exiting second context: -1.0*ATPM_reverse_5b752 + 1.0*ATPM\n",
+      "back to original objective: -1.0*Biomass_Ecoli_core_reverse_2cdba + 1.0*Biomass_Ecoli_core\n"
+     ]
+    }
+   ],
+   "source": [
+    "print('original objective: ', model.objective.expression)\n",
+    "with model:\n",
+    "    model.objective = 'ATPM'\n",
+    "    print('print objective in first context:', model.objective.expression)\n",
+    "    with model:\n",
+    "        model.objective = 'ACALD'\n",
+    "        print('print objective in second context:', model.objective.expression)\n",
+    "    print('objective after exiting second context:',\n",
+    "          model.objective.expression)\n",
+    "print('back to original objective:', model.objective.expression)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Most methods that modify the model are supported like this including adding and removing reactions and metabolites and setting the objective. Supported methods and functions mention this in the corresponding documentation."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "While it does not have any actual effect, for syntactic convenience it is also possible to refer to the model by a different name than outside the context. Such as"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 34,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "with model as inner:\n",
+    "    inner.reactions.PFK.knock_out"
+   ]
   }
  ],
  "metadata": {
@@ -884,9 +1152,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.6.0"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
 }
diff --git a/documentation_builder/index.rst b/documentation_builder/index.rst
index 40523f9..27c8df2 100644
--- a/documentation_builder/index.rst
+++ b/documentation_builder/index.rst
@@ -17,11 +17,11 @@ be viewed at `nbviewer
     simulating
     deletions
     phenotype_phase_plane
-    milp
-    qp
+    sampling
     loopless
     gapfilling
     solvers
+    constraints_objectives
     pymatbridge
     faq
     cobra
diff --git a/documentation_builder/io.ipynb b/documentation_builder/io.ipynb
index 697d1f5..c88bb89 100644
--- a/documentation_builder/io.ipynb
+++ b/documentation_builder/io.ipynb
@@ -4,9 +4,14 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Reading and Writing Models\n",
-    "\n",
-    "Cobrapy supports reading and writing models in SBML (with and without FBC), JSON, MAT, and pickle formats. Generally, SBML with FBC version 2 is the preferred format for general use. The JSON format may be more useful for cobrapy-specific functionality.\n",
+    "# Reading and Writing Models"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Cobrapy supports reading and writing models in SBML (with and without FBC), JSON, YAML, MAT, and pickle formats. Generally, SBML with FBC version 2 is the preferred format for general use. The JSON format may be more useful for cobrapy-specific functionality.\n",
     "\n",
     "The package also ships with test models in various formats for testing purposes."
    ]
@@ -14,16 +19,14 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
       "mini test files: \n",
-      "mini.mat, mini_cobra.xml, mini.json, mini_fbc2.xml.gz, mini_fbc2.xml.bz2, mini_fbc2.xml, mini_fbc1.xml, mini.pickle\n"
+      "mini.json, mini.mat, mini.pickle, mini.yml, mini_cobra.xml, mini_fbc1.xml, mini_fbc2.xml, mini_fbc2.xml.bz2, mini_fbc2.xml.gz\n"
      ]
     }
    ],
@@ -32,13 +35,10 @@
     "import os\n",
     "from os.path import join\n",
     "\n",
-    "data_dir = cobra.test.data_directory\n",
+    "data_dir = cobra.test.data_dir\n",
     "\n",
     "print(\"mini test files: \")\n",
-    "print(\", \".join(i for i in os.listdir(data_dir)\n",
-    "                if i.startswith(\"mini\")))\n",
-    "\n",
-    "\n",
+    "print(\", \".join(i for i in os.listdir(data_dir) if i.startswith(\"mini\")))\n",
     "\n",
     "textbook_model = cobra.test.create_test_model(\"textbook\")\n",
     "ecoli_model = cobra.test.create_test_model(\"ecoli\")\n",
@@ -64,14 +64,36 @@
   {
    "cell_type": "code",
    "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>mini_textbook</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x01074fd080</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>23</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>18</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*ATPM_reverse_5b752 - 1.0*PFK_reverse_d24a6 + 1.0*PFK + 1.0*ATPM</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>cytosol, extracellular</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
       "text/plain": [
-       "<Model mini_textbook at 0x7fa5e44d1a58>"
+       "<Model mini_textbook at 0x1074fd080>"
       ]
      },
      "execution_count": 2,
@@ -87,7 +109,7 @@
    "cell_type": "code",
    "execution_count": 3,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -100,20 +122,44 @@
    "source": [
     "There are other dialects of SBML prior to FBC 2 which have previously been use to encode COBRA models. The primary ones is the \"COBRA\" dialect which used the \"notes\" fields in SBML files.\n",
     "\n",
-    "Cobrapy can use [libsbml](http://sbml.org/Software/libSBML), which must be installed separately (see installation instructions) to read and write these files. When reading in a model, it will automatically detect whether fbc was used or not. When writing a model, the use_fbc_package flag can be used can be used to write files in this legacy \"cobra\" format."
+    "Cobrapy can use [libsbml](http://sbml.org/Software/libSBML), which must be installed separately (see installation instructions) to read and write these files. When reading in a model, it will automatically detect whether FBC was used or not. When writing a model, the use_fbc_package flag can be used can be used to write files in this legacy \"cobra\" format.\n",
+    "\n",
+    "Consider having the [lxml](http://lxml.de/) package installed as it can speed up parsing considerably."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 4,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>mini_textbook</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x0112fa6b38</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>23</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>18</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*ATPM_reverse_5b752 - 1.0*PFK_reverse_d24a6 + 1.0*PFK + 1.0*ATPM</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>cytosol, extracellular</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
       "text/plain": [
-       "<Model mini_textbook at 0x7fa5ba4d12e8>"
+       "<Model mini_textbook at 0x112fa6b38>"
       ]
      },
      "execution_count": 4,
@@ -129,34 +175,61 @@
    "cell_type": "code",
    "execution_count": 5,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
-    "cobra.io.write_sbml_model(textbook_model, \"test_cobra.xml\",\n",
-    "                          use_fbc_package=False)"
+    "cobra.io.write_sbml_model(\n",
+    "    textbook_model, \"test_cobra.xml\", use_fbc_package=False)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## JSON\n",
-    "\n",
-    "cobrapy models have a [JSON](https://en.wikipedia.org/wiki/JSON) (JavaScript Object Notation) representation. This format was crated for interoperability with [escher](https://escher.github.io)."
+    "## JSON"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Cobrapy models have a [JSON](https://en.wikipedia.org/wiki/JSON) (JavaScript Object Notation) representation. This format was created for interoperability with [escher](https://escher.github.io)."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>mini_textbook</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x0113061080</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>23</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>18</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*ATPM_reverse_5b752 - 1.0*PFK_reverse_d24a6 + 1.0*PFK + 1.0*ATPM</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>cytosol, extracellular</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
       "text/plain": [
-       "<Model mini_textbook at 0x7fa5ba4a3128>"
+       "<Model mini_textbook at 0x113061080>"
       ]
      },
      "execution_count": 6,
@@ -183,62 +256,175 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## MATLAB"
+    "## YAML"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Often, models may be imported and exported soley for the purposes of working with the same models in cobrapy and the [MATLAB cobra toolbox](http://opencobra.github.io/cobratoolbox/). MATLAB has its own \".mat\" format for storing variables. Reading and writing to these mat files from python requires scipy.\n",
-    "\n",
-    "A mat file can contain multiple MATLAB variables. Therefore, the variable name of the model in the MATLAB file can be passed into the reading function:"
+    "Cobrapy models have a [YAML](https://en.wikipedia.org/wiki/YAML) (YAML Ain't Markup Language) representation. This format was created for more human readable model representations and automatic diffs between models."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>mini_textbook</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x0113013390</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>23</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>18</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*ATPM_reverse_5b752 - 1.0*PFK_reverse_d24a6 + 1.0*PFK + 1.0*ATPM</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>extracellular, cytosol</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
+      "text/plain": [
+       "<Model mini_textbook at 0x113013390>"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "cobra.io.load_yaml_model(join(data_dir, \"mini.yml\"))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
+   "outputs": [],
+   "source": [
+    "cobra.io.save_yaml_model(textbook_model, \"test.yml\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## MATLAB"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Often, models may be imported and exported solely for the purposes of working with the same models in cobrapy and the [MATLAB cobra toolbox](http://opencobra.github.io/cobratoolbox/). MATLAB has its own \".mat\" format for storing variables. Reading and writing to these mat files from python requires scipy.\n",
+    "\n",
+    "A mat file can contain multiple MATLAB variables. Therefore, the variable name of the model in the MATLAB file can be passed into the reading function:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>mini_textbook</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x0113000b70</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>23</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>18</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*ATPM_reverse_5b752 - 1.0*PFK_reverse_d24a6 + 1.0*PFK + 1.0*ATPM</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>c, e</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
       "text/plain": [
-       "<Model mini_textbook at 0x7fa5ba483198>"
+       "<Model mini_textbook at 0x113000b70>"
       ]
      },
-     "execution_count": 8,
+     "execution_count": 10,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "cobra.io.load_matlab_model(join(data_dir, \"mini.mat\"),\n",
-    "                           variable_name=\"mini_textbook\")"
+    "cobra.io.load_matlab_model(\n",
+    "    join(data_dir, \"mini.mat\"), variable_name=\"mini_textbook\")"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "If the mat file contains only a single model, cobra can figure out which variable to read from, and the variable_name paramter is unnecessary."
+    "If the mat file contains only a single model, cobra can figure out which variable to read from, and the variable_name parameter is unnecessary."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 11,
+   "metadata": {},
    "outputs": [
     {
      "data": {
+      "text/html": [
+       "\n",
+       "        <table>\n",
+       "            <tr>\n",
+       "                <td><strong>Name</strong></td>\n",
+       "                <td>mini_textbook</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Memory address</strong></td>\n",
+       "                <td>0x0113758438</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of metabolites</strong></td>\n",
+       "                <td>23</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Number of reactions</strong></td>\n",
+       "                <td>18</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Objective expression</strong></td>\n",
+       "                <td>-1.0*ATPM_reverse_5b752 - 1.0*PFK_reverse_d24a6 + 1.0*PFK + 1.0*ATPM</td>\n",
+       "            </tr><tr>\n",
+       "                <td><strong>Compartments</strong></td>\n",
+       "                <td>c, e</td>\n",
+       "            </tr>\n",
+       "          </table>"
+      ],
       "text/plain": [
-       "<Model mini_textbook at 0x7fa5ba4a3f28>"
+       "<Model mini_textbook at 0x113758438>"
       ]
      },
-     "execution_count": 9,
+     "execution_count": 11,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -256,9 +442,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": 12,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -298,9 +484,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.6.0"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
 }
diff --git a/documentation_builder/loopless.ipynb b/documentation_builder/loopless.ipynb
index 30e6a6d..68ae44c 100644
--- a/documentation_builder/loopless.ipynb
+++ b/documentation_builder/loopless.ipynb
@@ -2,60 +2,31 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "# Loopless FBA\n",
-    "\n",
-    "The goal of this procedure is identification of a thermodynamically consistent flux state without loops, as implied by the name.\n",
-    "\n",
-    "Usually, the model has the following constraints.\n",
-    "$$ S \\cdot v = 0 $$\n",
-    "$$ lb \\le v \\le ub $$\n",
-    "\n",
-    "However, this will allow for thermodynamically infeasible loops (referred to as type 3 loops) to occur, where flux flows around a cycle without any net change of metabolites. For most cases, this is not a major issue, as solutions with these loops can usually be converted to equivalent solutions without them. However, if a flux state is desired which does not exhibit any of these loops, loopless FBA can be used. The formulation used here is modified from [Schellenberger et al.](http [...]
-    "\n",
-    "We can make the model irreversible, so that all reactions will satisfy\n",
-    "$$ 0 \\le lb \\le v \\le ub \\le \\max(ub) $$\n",
-    "\n",
-    "We will add in boolean indicators as well, such that\n",
-    "$$ \\max(ub) \\cdot i \\ge v $$\n",
-    "$$ i \\in \\{0, 1\\} $$\n",
-    "\n",
-    "We also want to ensure that an entry in the row space of S also exists with negative values wherever v is nonzero. In this expression, $1-i$ acts as a not to indicate inactivity of a reaction.\n",
-    "\n",
-    "$$ S^\\mathsf T x - (1 - i) (\\max(ub) + 1) \\le -1 $$\n",
-    "\n",
-    "We will construct an LP integrating both constraints.\n",
-    "\n",
-    "$$ \\left(\n",
-    "\\begin{matrix}\n",
-    "S & 0 & 0\\\\\n",
-    "-I & \\max(ub)I & 0 \\\\\n",
-    "0 & (\\max(ub) + 1)I & S^\\mathsf T\n",
-    "\\end{matrix}\n",
-    "\\right)\n",
-    "\\cdot\n",
-    "\\left(\n",
-    "\\begin{matrix}\n",
-    "v \\\\\n",
-    "i \\\\\n",
-    "x\n",
-    "\\end{matrix}\n",
-    "\\right)\n",
-    "\\begin{matrix}\n",
-    "&=& 0 \\\\\n",
-    "&\\ge& 0 \\\\\n",
-    "&\\le& \\max(ub)\n",
-    "\\end{matrix}$$\n",
-    "\n",
-    "Note that these extra constraints are not applied to boundary reactions which bring metabolites in and out of the system."
+    "# Loopless FBA"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "The goal of this procedure is identification of a thermodynamically consistent flux state without loops, as implied by the name. You can find a more detailed description in the [method](#Method) section at the end of the notebook."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
@@ -64,31 +35,147 @@
     "\n",
     "import cobra.test\n",
     "from cobra import Reaction, Metabolite, Model\n",
-    "from cobra.flux_analysis.loopless import construct_loopless_model\n",
-    "from cobra.flux_analysis import optimize_minimal_flux\n",
-    "from cobra.solvers import get_solver_name"
+    "from cobra.flux_analysis.loopless import add_loopless, loopless_solution\n",
+    "from cobra.flux_analysis import pfba"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "We will demonstrate with a toy model which has a simple loop cycling A -> B -> C -> A, with A allowed to enter the system and C allowed to leave. A graphical view of the system is drawn below:"
+    "## Loopless solution"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Classical loopless approaches as described below are computationally expensive to solve due to the added mixed-integer constraints. A much faster, and pragmatic approach is instead to post-process flux distributions to simply set fluxes to zero wherever they can be zero without changing the fluxes of any exchange reactions in the model. [CycleFreeFlux](http://dx.doi.org/10.1093/bioinformatics/btv096) is an algorithm that can be used to achieve this and in cobrapy it is implemented i [...]
+    "\n",
+    "Using a larger model than the simple example above, this can be demonstrated as follows"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 2,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "salmonella = cobra.test.create_test_model('salmonella')\n",
+    "nominal = salmonella.optimize()\n",
+    "loopless = loopless_solution(salmonella)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "import pandas\n",
+    "df = pandas.DataFrame(dict(loopless=loopless.fluxes, nominal=nominal.fluxes))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<matplotlib.axes._subplots.AxesSubplot at 0x10f7cb3c8>"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9TaGFkaW5nIDYgMCBSIC9Qcm9jU2V0IFsgL1BERiAvVGV4dCAv\nSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdCi9Gb250IDMgMCBSIC9QYXR0ZXJuIDUgMCBSIC9YT2Jq\nZWN0IDcgMCBSIC9FeHRHU3RhdGUgNCAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9SZXNvdXJj\nZXMgOCAwIFIgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWyAwIDAgMzk0LjQ2NTYyNSAyNjUuNjgz\nNzUgXQovR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAvR3Jv [...]
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEKCAYAAAAMzhLIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAHCVJREFUeJzt3XF4VPWd7/H3d0II1FChIaWYgKBButhi6p1rpVAeq7ba\nbi+4xVJv18XutsvjPtpte+8W9LrXbu36bMV93L3dduvyWHfBZWut1IWrthVZrLUr0mhDBBSNcpVQ\nDJiCEoUhyXzvH3MmTMLkZIKZOWeSz+t55sk5v3Nm8tUn5JPf+f3O75i7IyIiMpBE1AWIiEi8KShE\nRCSUgkJEREIpKEREJJSCQkREQikoREQklIJCRERCKShERCSUgkJEREKNibqA4TB58mSfMWNG1GWI\niJSVp59++nV3rx3svBERFDNmzKCpqSnqMkREyoqZvVLIebr0JCIioRQUIiISSkEhIiKhFBQiIhJK\nQSEi [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0x103f18198>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "df.plot.scatter(x='loopless', y='nominal')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "This functionality can also be used in FVA by using the `loopless=True` argument to avoid getting high flux ranges for reactions that essentially only can reach high fluxes if they are allowed to participate in loops (see the simulation notebook) leading to much narrower flux ranges."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "## Loopless model"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Cobrapy also includes the \"classical\" loopless formulation by [Schellenberger et. al.](https://dx.doi.org/10.1016%2Fj.bpj.2010.12.3707) implemented in `cobra.flux_analysis.add_loopless` modify the model with additional mixed-integer constraints that make thermodynamically infeasible loops impossible. This is much slower than the strategy provided above and should only be used if one of the two following cases applies:\n",
+    "\n",
+    "1. You want to combine a non-linear (e.g. quadratic) objective with the loopless condition\n",
+    "2. You want to force the model to be infeasible in the presence of loops independent of the set reaction bounds.\n",
+    "\n",
+    "We will demonstrate this with a toy model which has a simple loop cycling A $\\rightarrow$ B $\\rightarrow$ C $\\rightarrow$ A, with A allowed to enter the system and C allowed to leave. A graphical view of the system is drawn below:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true,
+    "scrolled": true
    },
    "outputs": [
     {
      "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1R5cGUgL0NhdGFsb2cgL1BhZ2VzIDIgMCBSID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9QYXR0ZXJuIDUgMCBSIC9TaGFkaW5nIDYgMCBSCi9Qcm9jU2V0\nIFsgL1BERiAvVGV4dCAvSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdIC9YT2JqZWN0IDcgMCBSIC9G\nb250IDMgMCBSCi9FeHRHU3RhdGUgNCAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9UeXBlIC9Q\nYWdlIC9Hcm91cCA8PCAvVHlwZSAvR3JvdXAgL1MgL1RyYW5zcGFyZW5jeSAvQ1MgL0RldmljZVJH\nQiA+PgovUGFyZW50IDIgMCBSIC9Db250ZW50cyA5IDAgUiAvUmVzb3VyY2VzIDggMCBSIC9Bbm5v [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnQAAAEcCAYAAABOJillAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xe4JFWd//H3dwJMIo0iCEiQsLpG1B3BBCtmAUHSGhhZ\nVBQHI6YVFzBhzmDARaIRRAXUURGVIEEFFFFBJYiAyA9wgAnAzHx/f5y6zuVy8+3u6up+v56nn7rd\nXdX1vTNd3Z97TtU5kZlIkiSpuabVXYAkSZKmxkAnSZLUcAY6SZKkhjPQSZIkNZyBTpIkqeEMdJIk\nSQ1noJMkSWo4A50kSVLDGegkSZIazkAnSZLUcAY6SZKkhjPQSZIkNZyBTpIkqeEMdJIkSQ1noJMk\nSWo4A50kSVLDGegkSZIazkAnSZLUcAY6SZKkhjPQSZIkNZyBTpIkqeEMdJIkSQ1noJMkSWo4A50k\nSVLD [...]
+      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9TaGFkaW5nIDYgMCBSIC9Qcm9jU2V0IFsgL1BERiAvVGV4dCAv\nSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdCi9Gb250IDMgMCBSIC9QYXR0ZXJuIDUgMCBSIC9YT2Jq\nZWN0IDcgMCBSIC9FeHRHU3RhdGUgNCAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9SZXNvdXJj\nZXMgOCAwIFIgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWyAwIDAgNjMxLjE1OTM3NSAyNzkuNDg4\nNzUgXQovR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAvR3Jv [...]
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAncAAAEYCAYAAAA+gNBwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xe4JFWd//H3dyIzg0gYgggighFXBXeHAVTMCQYQBSPJ\ntArqKhgxgIpiAnXXwUV/KsFIGhBdUddVRIIoGFBWAVFZVAQJIjCJme/vjzrNXC433+6u7ur363nu\nUx2qur4Dt7s/95w650RmIkmSpGaYUXcBkiRJah/DnSRJUoMY7iRJkhrEcCdJktQghjtJkqQGMdxJ\nkiQ1iOFOkiSpQQx3kiRJDWK4kyRJahDDnSRJUoMY7iRJkhrEcCdJktQghjtJkqQGMdxJkiQ1iOFO\nkiSpQQx3kiRJDWK4kyRJahDDnSRJUoMY7iRJkhrEcCdJktQghjtJkqQGMdxJkiQ1iOFOkiSpQQx3\nkiRJ [...]
       "text/plain": [
-       "<matplotlib.figure.Figure at 0x7f92b021c908>"
+       "<matplotlib.figure.Figure at 0x104bd52e8>"
       ]
      },
      "metadata": {},
@@ -101,38 +188,45 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 6,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
-    "test_model = Model()\n",
-    "test_model.add_metabolites([Metabolite(i) for i in \"ABC\"])\n",
-    "test_model.add_reactions([Reaction(i) for i in\n",
-    "                          [\"EX_A\", \"DM_C\", \"v1\", \"v2\", \"v3\"]])\n",
+    "model = Model()\n",
+    "model.add_metabolites([Metabolite(i) for i in \"ABC\"])\n",
+    "model.add_reactions([Reaction(i) for i in [\"EX_A\", \"DM_C\", \"v1\", \"v2\", \"v3\"]])\n",
+    "\n",
+    "model.reactions.EX_A.add_metabolites({\"A\": 1})\n",
+    "model.reactions.DM_C.add_metabolites({\"C\": -1})\n",
     "\n",
-    "test_model.reactions.EX_A.add_metabolites({\"A\": 1})\n",
-    "test_model.reactions.DM_C.add_metabolites({\"C\": -1})\n",
-    "test_model.reactions.DM_C.objective_coefficient = 1\n",
+    "model.reactions.v1.add_metabolites({\"A\": -1, \"B\": 1})\n",
+    "model.reactions.v2.add_metabolites({\"B\": -1, \"C\": 1})\n",
+    "model.reactions.v3.add_metabolites({\"C\": -1, \"A\": 1})\n",
     "\n",
-    "test_model.reactions.v1.add_metabolites({\"A\": -1, \"B\": 1})\n",
-    "test_model.reactions.v2.add_metabolites({\"B\": -1, \"C\": 1})\n",
-    "test_model.reactions.v3.add_metabolites({\"C\": -1, \"A\": 1})"
+    "model.objective = 'DM_C'"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "While this model contains a loop, a flux state exists which has no flux through reaction v3, and is identified by loopless FBA."
+    "While this model contains a loop, a flux state exists which has no flux through reaction v$_3$, and is identified by loopless FBA."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 7,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -140,28 +234,35 @@
      "output_type": "stream",
      "text": [
       "loopless solution: status = optimal\n",
-      "loopless solution: v3 = 0.0\n"
+      "loopless solution flux: v3 = 0.0\n"
      ]
     }
    ],
    "source": [
-    "solution = construct_loopless_model(test_model).optimize()\n",
+    "with model:\n",
+    "    add_loopless(model)\n",
+    "    solution = model.optimize()\n",
     "print(\"loopless solution: status = \" + solution.status)\n",
-    "print(\"loopless solution: v3 = %.1f\" % solution.x_dict[\"v3\"])"
+    "print(\"loopless solution flux: v3 = %.1f\" % solution.fluxes[\"v3\"])"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "If there there is no forced flux through a loopless reaction, parsimonious FBA will also have no flux through the loop."
+    "If there is no forced flux through a loopless reaction, parsimonious FBA will also have no flux through the loop."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 8,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
@@ -169,137 +270,125 @@
      "output_type": "stream",
      "text": [
       "parsimonious solution: status = optimal\n",
-      "parsimonious solution: v3 = 0.0\n"
+      "loopless solution flux: v3 = 0.0\n"
      ]
     }
    ],
    "source": [
-    "solution = optimize_minimal_flux(test_model)\n",
+    "solution = pfba(model)\n",
     "print(\"parsimonious solution: status = \" + solution.status)\n",
-    "print(\"parsimonious solution: v3 = %.1f\" % solution.x_dict[\"v3\"])"
+    "print(\"loopless solution flux: v3 = %.1f\" % solution.fluxes[\"v3\"])"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "However, if flux is forced through v3, then there is no longer a feasible loopless solution, but the parsimonious solution will still exist."
+    "However, if flux is forced through v$_3$, then there is no longer a feasible loopless solution, but the parsimonious solution will still exist."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 9,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "loopless solution: status = infeasible\n"
+      "model is infeasible\n"
      ]
-    }
-   ],
-   "source": [
-    "test_model.reactions.v3.lower_bound = 1\n",
-    "solution = construct_loopless_model(test_model).optimize()\n",
-    "print(\"loopless solution: status = \" + solution.status)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
+    },
     {
-     "name": "stdout",
+     "name": "stderr",
      "output_type": "stream",
      "text": [
-      "parsimonious solution: status = optimal\n",
-      "parsimonious solution: v3 = 1.0\n"
+      "cobra/util/solver.py:398 \u001b[1;31mUserWarning\u001b[0m: solver status is 'infeasible'\n"
      ]
     }
    ],
    "source": [
-    "solution = optimize_minimal_flux(test_model)\n",
-    "print(\"parsimonious solution: status = \" + solution.status)\n",
-    "print(\"parsimonious solution: v3 = %.1f\" % solution.x_dict[\"v3\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Loopless FBA is also possible on genome scale models, but it requires a capable MILP solver. If one is installed, cobrapy can detect it automatically using the get_solver_name function"
+    "model.reactions.v3.lower_bound = 1\n",
+    "with model:\n",
+    "    add_loopless(model)\n",
+    "    try:\n",
+    "        solution = model.optimize()\n",
+    "    except:\n",
+    "        print('model is infeasible')"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": 10,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "cplex\n"
+      "parsimonious solution: status = optimal\n",
+      "loopless solution flux: v3 = 1.0\n"
      ]
     }
    ],
    "source": [
-    "mip_solver = get_solver_name(mip=True)\n",
-    "print(mip_solver)"
+    "solution = pfba(model)\n",
+    "print(\"parsimonious solution: status = \" + solution.status)\n",
+    "print(\"loopless solution flux: v3 = %.1f\" % solution.fluxes[\"v3\"])"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 9,
+   "cell_type": "markdown",
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<Solution 0.38 at 0x7f9285d7ffd0>"
-      ]
-     },
-     "execution_count": 9,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
    "source": [
-    "salmonella = cobra.test.create_test_model(\"salmonella\")\n",
-    "construct_loopless_model(salmonella).optimize(solver=mip_solver)"
+    "## Method"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 10,
+   "cell_type": "markdown",
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<Solution 0.98 at 0x7f9285c89470>"
-      ]
-     },
-     "execution_count": 10,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
    "source": [
-    "ecoli = cobra.test.create_test_model(\"ecoli\")\n",
-    "construct_loopless_model(ecoli).optimize(solver=mip_solver)"
+    "`loopless_solution` is based on a given reference flux distribution. It will look for a new flux distribution with the following requirements:\n",
+    "\n",
+    "1. The objective value is the same as in the reference fluxes.\n",
+    "2. All exchange fluxes have the same value as in the reference distribution.\n",
+    "3. All non-exchange fluxes have the same sign (flow in the same direction) as the reference fluxes.\n",
+    "4. The sum of absolute non-exchange fluxes is minimized.\n",
+    "\n",
+    "As proven in the [original publication](http://dx.doi.org/10.1093/bioinformatics/btv096) this will identify the \"least-loopy\" solution closest to the reference fluxes.\n",
+    "\n",
+    "If you are using `add_loopless` this will use the method [described here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3030201/). In summary, it will add $G \\approx \\Delta G$ proxy variables and make loops thermodynamically infeasible. This is achieved by the following formulation.\n",
+    "\n",
+    "$$\n",
+    "\\begin{eqnarray}\n",
+    "&\\text{maximize } v_{obj} \\\\\n",
+    "s.t. & Sv = 0\\\\\n",
+    "& lb_j \\leq v_j \\leq ub_j \\\\\n",
+    "& -M\\cdot (1 - a_i) \\leq v_i \\leq M\\cdot a_i\\\\\n",
+    "& -1000a_i + (1 - a_i) \\leq G_i \\leq -a_i + 1000(1 - a_i)\\\\\n",
+    "& N_{int}G = 0\\\\\n",
+    "& a_i \\in \\{0, 1\\}\n",
+    "\\end{eqnarray}\n",
+    "$$\n",
+    "\n",
+    "Here the index j runs over all reactions and the index i only over internal ones. $a_i$ are indicator variables which equal one if the reaction flux flows in hte forward direction and 0 otherwise. They are used to force the G proxies to always carry the opposite sign of the flux (as it is the case for the \"real\" $\\Delta G$ values). $N_{int}$ is the nullspace matrix for internal reactions and is used to find thermodinamically \"correct\" values for G. \n"
    ]
   }
  ],
@@ -319,7 +408,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.5.2"
   }
  },
  "nbformat": 4,
diff --git a/documentation_builder/milp.ipynb b/documentation_builder/milp.ipynb
deleted file mode 100644
index 7e92e1f..0000000
--- a/documentation_builder/milp.ipynb
+++ /dev/null
@@ -1,423 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Mixed-Integer Linear Programming\n",
-    "\n",
-    "## Ice Cream\n",
-    "\n",
-    "This example was originally contributed by Joshua Lerman.\n",
-    "\n",
-    "An ice cream stand sells cones and popsicles. It wants to maximize its profit, but is subject to a budget.\n",
-    "\n",
-    "We can write this problem as a linear program:\n",
-    "\n",
-    "> **max** cone $\\cdot$ cone_margin + popsicle $\\cdot$ popsicle margin\n",
-    "\n",
-    "> *subject to*\n",
-    "\n",
-    "> cone $\\cdot$ cone_cost + popsicle $\\cdot$ popsicle_cost $\\le$ budget"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "cone_selling_price = 7.\n",
-    "cone_production_cost = 3.\n",
-    "popsicle_selling_price = 2.\n",
-    "popsicle_production_cost = 1.\n",
-    "starting_budget = 100."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This problem can be written as a cobra.Model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'cone': 33.333333333333336, 'popsicle': 0.0}"
-      ]
-     },
-     "execution_count": 2,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "from cobra import Model, Metabolite, Reaction\n",
-    "\n",
-    "cone = Reaction(\"cone\")\n",
-    "popsicle = Reaction(\"popsicle\")\n",
-    "\n",
-    "# constrainted to a budget\n",
-    "budget = Metabolite(\"budget\")\n",
-    "budget._constraint_sense = \"L\"\n",
-    "budget._bound = starting_budget\n",
-    "cone.add_metabolites({budget: cone_production_cost})\n",
-    "popsicle.add_metabolites({budget: popsicle_production_cost})\n",
-    "\n",
-    "# objective coefficient is the profit to be made from each unit\n",
-    "cone.objective_coefficient = \\\n",
-    "    cone_selling_price - cone_production_cost\n",
-    "popsicle.objective_coefficient = \\\n",
-    "    popsicle_selling_price - popsicle_production_cost\n",
-    "\n",
-    "m = Model(\"lerman_ice_cream_co\")\n",
-    "m.add_reactions((cone, popsicle))\n",
-    "\n",
-    "m.optimize().x_dict"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In reality, cones and popsicles can only be sold in integer amounts. We can use the variable kind attribute of a cobra.Reaction to enforce this."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'cone': 33.0, 'popsicle': 1.0}"
-      ]
-     },
-     "execution_count": 3,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "cone.variable_kind = \"integer\"\n",
-    "popsicle.variable_kind = \"integer\"\n",
-    "m.optimize().x_dict"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now the model makes both popsicles and cones."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Restaurant Order\n",
-    "\n",
-    "To tackle the less immediately obvious problem from the following [XKCD comic](http://xkcd.com/287/):"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/html": [
-       "<img src=\"http://imgs.xkcd.com/comics/np_complete.png\"/>"
-      ],
-      "text/plain": [
-       "<IPython.core.display.Image object>"
-      ]
-     },
-     "execution_count": 4,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "from IPython.display import Image\n",
-    "Image(url=r\"http://imgs.xkcd.com/comics/np_complete.png\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We want a solution satisfying the following constraints:\n",
-    "\n",
-    "$\\left(\\begin{matrix}2.15&2.75&3.35&3.55&4.20&5.80\\end{matrix}\\right) \\cdot \\vec v = 15.05$\n",
-    "\n",
-    "$\\vec v_i \\ge 0$\n",
-    "\n",
-    "$\\vec v_i \\in \\mathbb{Z}$\n",
-    "\n",
-    "This problem can be written as a COBRA model as well."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'french_fries': 0.0,\n",
-       " 'hot_wings': 2.0,\n",
-       " 'mixed_fruit': 1.0,\n",
-       " 'mozarella_sticks': 0.0,\n",
-       " 'sampler_plate': 1.0,\n",
-       " 'side_salad': 0.0}"
-      ]
-     },
-     "execution_count": 5,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "total_cost = Metabolite(\"constraint\")\n",
-    "total_cost._bound = 15.05\n",
-    "\n",
-    "costs = {\"mixed_fruit\": 2.15, \"french_fries\": 2.75,\n",
-    "         \"side_salad\": 3.35, \"hot_wings\": 3.55,\n",
-    "         \"mozarella_sticks\": 4.20, \"sampler_plate\": 5.80}\n",
-    "\n",
-    "m = Model(\"appetizers\")\n",
-    "\n",
-    "for item, cost in costs.items():\n",
-    "    r = Reaction(item)\n",
-    "    r.add_metabolites({total_cost: cost})\n",
-    "    r.variable_kind = \"integer\"\n",
-    "    m.add_reaction(r)\n",
-    "\n",
-    "# To add to the problem, suppose we want to\n",
-    "# eat as little mixed fruit as possible.\n",
-    "m.reactions.mixed_fruit.objective_coefficient = 1\n",
-    "    \n",
-    "m.optimize(objective_sense=\"minimize\").x_dict"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "There is another solution to this problem, which would have been obtained if we had maximized for mixed fruit instead of minimizing."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'french_fries': 0.0,\n",
-       " 'hot_wings': 0.0,\n",
-       " 'mixed_fruit': 7.0,\n",
-       " 'mozarella_sticks': 0.0,\n",
-       " 'sampler_plate': 0.0,\n",
-       " 'side_salad': 0.0}"
-      ]
-     },
-     "execution_count": 6,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "m.optimize(objective_sense=\"maximize\").x_dict"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Boolean Indicators\n",
-    "\n",
-    "To give a COBRA-related example, we can create boolean variables as integers, which can serve as indicators for a reaction being active in a model. For a reaction flux $v$ with lower bound -1000 and upper bound 1000, we can create a binary variable $b$ with the following constraints:\n",
-    "\n",
-    "$b \\in \\{0, 1\\}$\n",
-    "\n",
-    "$-1000 \\cdot b \\le v \\le 1000 \\cdot b$\n",
-    "\n",
-    "To introduce the above constraints into a cobra model, we can rewrite them as follows\n",
-    "\n",
-    "$v \\le b \\cdot 1000 \\Rightarrow v- 1000\\cdot b \\le 0$\n",
-    "\n",
-    "$-1000 \\cdot b \\le v \\Rightarrow v + 1000\\cdot b \\ge 0$"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import cobra.test\n",
-    "model = cobra.test.create_test_model(\"textbook\")\n",
-    "\n",
-    "# an indicator for pgi\n",
-    "pgi = model.reactions.get_by_id(\"PGI\")\n",
-    "# make a boolean variable\n",
-    "pgi_indicator = Reaction(\"indicator_PGI\")\n",
-    "pgi_indicator.lower_bound = 0\n",
-    "pgi_indicator.upper_bound = 1\n",
-    "pgi_indicator.variable_kind = \"integer\"\n",
-    "# create constraint for v - 1000 b <= 0\n",
-    "pgi_plus = Metabolite(\"PGI_plus\")\n",
-    "pgi_plus._constraint_sense = \"L\"\n",
-    "# create constraint for v + 1000 b >= 0\n",
-    "pgi_minus = Metabolite(\"PGI_minus\")\n",
-    "pgi_minus._constraint_sense = \"G\"\n",
-    "\n",
-    "pgi_indicator.add_metabolites({pgi_plus: -1000,\n",
-    "                               pgi_minus: 1000})\n",
-    "pgi.add_metabolites({pgi_plus: 1, pgi_minus: 1})\n",
-    "model.add_reaction(pgi_indicator)\n",
-    "\n",
-    "\n",
-    "# an indicator for zwf\n",
-    "zwf = model.reactions.get_by_id(\"G6PDH2r\")\n",
-    "zwf_indicator = Reaction(\"indicator_ZWF\")\n",
-    "zwf_indicator.lower_bound = 0\n",
-    "zwf_indicator.upper_bound = 1\n",
-    "zwf_indicator.variable_kind = \"integer\"\n",
-    "# create constraint for v - 1000 b <= 0\n",
-    "zwf_plus = Metabolite(\"ZWF_plus\")\n",
-    "zwf_plus._constraint_sense = \"L\"\n",
-    "# create constraint for v + 1000 b >= 0\n",
-    "zwf_minus = Metabolite(\"ZWF_minus\")\n",
-    "zwf_minus._constraint_sense = \"G\"\n",
-    "\n",
-    "zwf_indicator.add_metabolites({zwf_plus: -1000,\n",
-    "                               zwf_minus: 1000})\n",
-    "zwf.add_metabolites({zwf_plus: 1, zwf_minus: 1})\n",
-    "\n",
-    "# add the indicator reactions to the model\n",
-    "model.add_reaction(zwf_indicator)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In a model with both these reactions active, the indicators will also be active"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "PGI indicator = 1\n",
-      "ZWF indicator = 1\n",
-      "PGI flux = 4.86\n",
-      "ZWF flux = 4.96\n"
-     ]
-    }
-   ],
-   "source": [
-    "solution = model.optimize()\n",
-    "print(\"PGI indicator = %d\" % solution.x_dict[\"indicator_PGI\"])\n",
-    "print(\"ZWF indicator = %d\" % solution.x_dict[\"indicator_ZWF\"])\n",
-    "print(\"PGI flux = %.2f\" % solution.x_dict[\"PGI\"])\n",
-    "print(\"ZWF flux = %.2f\" % solution.x_dict[\"G6PDH2r\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Because these boolean indicators are in the model, additional constraints can be applied on them. For example, we can prevent both reactions from being active at the same time by adding the following constraint:\n",
-    "\n",
-    "$b_\\text{pgi} + b_\\text{zwf} = 1$"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "PGI indicator = 1\n",
-      "ZWF indicator = 0\n",
-      "PGI flux = 9.82\n",
-      "ZWF flux = 0.00\n"
-     ]
-    }
-   ],
-   "source": [
-    "or_constraint = Metabolite(\"or\")\n",
-    "or_constraint._bound = 1\n",
-    "zwf_indicator.add_metabolites({or_constraint: 1})\n",
-    "pgi_indicator.add_metabolites({or_constraint: 1})\n",
-    "\n",
-    "solution = model.optimize()\n",
-    "print(\"PGI indicator = %d\" % solution.x_dict[\"indicator_PGI\"])\n",
-    "print(\"ZWF indicator = %d\" % solution.x_dict[\"indicator_ZWF\"])\n",
-    "print(\"PGI flux = %.2f\" % solution.x_dict[\"PGI\"])\n",
-    "print(\"ZWF flux = %.2f\" % solution.x_dict[\"G6PDH2r\"])"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.4.3"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/documentation_builder/phenotype_phase_plane.ipynb b/documentation_builder/phenotype_phase_plane.ipynb
index 0b16484..6b7497f 100644
--- a/documentation_builder/phenotype_phase_plane.ipynb
+++ b/documentation_builder/phenotype_phase_plane.ipynb
@@ -4,36 +4,43 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Phenotype Phase Plane\n",
-    "\n",
-    "Phenotype phase planes will show distinct phases of optimal growth with different use of two different substrates. For more information, see [Edwards et al.](http://dx.doi.org/10.1002/bit.10047)\n",
+    "# Production envelopes"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
+   "source": [
+    "Production envelopes (aka phenotype phase planes) will show distinct phases of optimal growth with different use of two different substrates. For more information, see [Edwards et al.](http://dx.doi.org/10.1002/bit.10047)\n",
     "\n",
-    "Cobrapy supports calculating and plotting (using [matplotlib](http://matplotlib.org)) these phenotype phase planes. Here, we will make one for the \"textbook\" _E. coli_ core model."
+    "Cobrapy supports calculating these production envelopes and they can easily be plotted using your favorite plotting package. Here, we will make one for the \"textbook\" _E. coli_ core model and demonstrate plotting using [matplotlib](http://matplotlib.org/)."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
-    "%matplotlib inline\n",
-    "from IPython.display import set_matplotlib_formats\n",
-    "set_matplotlib_formats('png', 'pdf')\n",
-    "\n",
-    "from time import time\n",
-    "\n",
     "import cobra.test\n",
-    "from cobra.flux_analysis import calculate_phenotype_phase_plane\n",
+    "from cobra.flux_analysis import production_envelope\n",
     "\n",
     "model = cobra.test.create_test_model(\"textbook\")"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
     "We want to make a phenotype phase plane to evaluate uptakes of Glucose and Oxygen."
    ]
@@ -42,89 +49,248 @@
    "cell_type": "code",
    "execution_count": 2,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "prod_env = production_envelope(model, [\"EX_glc__D_e\", \"EX_o2_e\"])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9TaGFkaW5nIDYgMCBSIC9Gb250IDMgMCBSCi9Qcm9jU2V0IFsg\nL1BERiAvVGV4dCAvSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdIC9YT2JqZWN0IDcgMCBSCi9FeHRH\nU3RhdGUgNCAwIFIgL1BhdHRlcm4gNSAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9SZXNvdXJj\nZXMgOCAwIFIgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWyAwIDAgNDI0LjggMjgwLjggXQovQW5u\nb3RzIFsgXSAvR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAv [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWmQHGWa5/nzK+77yMhUSiAJJG5xFqe4xS0omqKoqt6e\nGqsqq55em+21bltr6w/zabp7ra1tP3bbdFv37K7tTM10F3VzSAIhDiEkQIBACCFA6EBSKjMy7giP\nCL/3g6cHoahMHanMVEr4zwxTogzF6+7h8T7+f9/nef6C4zj4+Pj4+PgsNsRzfQA+Pj4+Pj7T4Qco\nHx8fH59FiR+gfHx8fHwWJX6A8vHx8fFZlPgBysfHx8dnUSKf4vd+ip+Pj4+Pz3wjTPeXvoLy8fHx\n8VmU+AHKx8fHx2dR4gcoHx8fH59FiR+gfHx8fHwWJX6A8vHx8fFZlPgBysfHx8dnUeIHKB8fHx+f\nRYkf [...]
+      "text/html": [
+       "<div>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>EX_glc__D_e</th>\n",
+       "      <th>EX_o2_e</th>\n",
+       "      <th>direction</th>\n",
+       "      <th>flux</th>\n",
+       "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
+       "    <tr>\n",
+       "      <th>0</th>\n",
+       "      <td>-10.0</td>\n",
+       "      <td>-60.000000</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>1</th>\n",
+       "      <td>-10.0</td>\n",
+       "      <td>-56.842105</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>2</th>\n",
+       "      <td>-10.0</td>\n",
+       "      <td>-53.684211</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>3</th>\n",
+       "      <td>-10.0</td>\n",
+       "      <td>-50.526316</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>4</th>\n",
+       "      <td>-10.0</td>\n",
+       "      <td>-47.368421</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "</div>"
+      ],
       "text/plain": [
-       "<matplotlib.figure.Figure at 0x7fd0f14fcc88>"
+       "   EX_glc__D_e    EX_o2_e direction  flux\n",
+       "0        -10.0 -60.000000   minimum   0.0\n",
+       "1        -10.0 -56.842105   minimum   0.0\n",
+       "2        -10.0 -53.684211   minimum   0.0\n",
+       "3        -10.0 -50.526316   minimum   0.0\n",
+       "4        -10.0 -47.368421   minimum   0.0"
       ]
      },
+     "execution_count": 3,
      "metadata": {},
-     "output_type": "display_data"
+     "output_type": "execute_result"
     }
    ],
    "source": [
-    "data = calculate_phenotype_phase_plane(\n",
-    "    model, \"EX_glc__D_e\", \"EX_o2_e\")\n",
-    "data.plot_matplotlib();"
+    "prod_env.head()"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
+   "metadata": {
+    "deletable": true,
+    "editable": true
+   },
    "source": [
-    "If [palettable](https://github.com/jiffyclub/palettable) is installed, other color schemes can be used as well"
+    "If we specify the carbon source, we can also get the carbon and mass yield. For example, temporarily setting the objective to produce acetate instead we could get production envelope as follows and pandas to quickly plot the results."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
+   },
+   "outputs": [],
+   "source": [
+    "prod_env = production_envelope(\n",
+    "    model, [\"EX_o2_e\"], objective=\"EX_ac_e\", c_source=\"EX_glc__D_e\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9TaGFkaW5nIDYgMCBSIC9Gb250IDMgMCBSCi9Qcm9jU2V0IFsg\nL1BERiAvVGV4dCAvSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdIC9YT2JqZWN0IDcgMCBSCi9FeHRH\nU3RhdGUgNCAwIFIgL1BhdHRlcm4gNSAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9SZXNvdXJj\nZXMgOCAwIFIgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWyAwIDAgNDI0LjggMjgwLjggXQovQW5u\nb3RzIFsgXSAvR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAv [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXdUVFn2tp/KVWQQBEEQUVFQzDlnbVOrbc45tx2m43T/\nvok903HsYM5tzpgzYs4RRcyioqhIpnL6/sBbUzKYEATt+6zVy9VSckPde/Z5z3n33hK73Y6IiIiI\niEhJQ1rcJyAiIiIiIpIfYoASERERESmRiAFKRERERKREIgYoEREREZESiRigRERERERKJPLn/Fy0\n+ImIiIiIFDWS/P5SVFAiIiIiIiUSMUCJiIiIiJRIxAAlIiIiIlIiEQOUiIiIiEiJRAxQIiIiIiIl\nEjFAiYiIiIiUSMQAJSIiIiJSIhEDlIiIiIhIiUQMUCIiIiIiJRIxQImIiIiIlEjEACUiIiIiUiIR\nA5SI [...]
-      "text/plain": [
-       "<matplotlib.figure.Figure at 0x7fd0d8ff8e80>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9TaGFkaW5nIDYgMCBSIC9Gb250IDMgMCBSCi9Qcm9jU2V0IFsg\nL1BERiAvVGV4dCAvSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdIC9YT2JqZWN0IDcgMCBSCi9FeHRH\nU3RhdGUgNCAwIFIgL1BhdHRlcm4gNSAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9SZXNvdXJj\nZXMgOCAwIFIgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWyAwIDAgNDI0LjggMjgwLjggXQovQW5u\nb3RzIFsgXSAvR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAv [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXeYHNWVt9+q6pzDBGVplHNCQjlLCBAiGRtswBiwARvD\nOqzD2p93sddhWXsdFhDBxmGNMZhkEwQooJxzzlkaSTPTOYeq+v7oqXZrGKXRzGgk6n0enrFnWn2r\nqqvvqd+9v3OOoKoqOjo6Ojo6bQ3xch+Ajo6Ojo5OY+gBSkdHR0enTaIHKB0dHR2dNokeoHR0dHR0\n2iR6gNLR0dHRaZMYzvN33eKno6Ojo9PSCI39UldQOjo6OjptEj1A6ejo6Oi0SfQApaOjo6PTJtED\nlI6Ojo5Om0QPUDo6Ojo6bRI9QOno6OjotEn0AKWjo6Oj0ybRA5SOjo6OTptED1A6Ojo6Om0SPUDp\n6Ojo [...]
+      "text/html": [
+       "<div>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>EX_o2_e</th>\n",
+       "      <th>carbon_source</th>\n",
+       "      <th>carbon_yield</th>\n",
+       "      <th>direction</th>\n",
+       "      <th>flux</th>\n",
+       "      <th>mass_yield</th>\n",
+       "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
+       "    <tr>\n",
+       "      <th>0</th>\n",
+       "      <td>-60.000000</td>\n",
+       "      <td>EX_glc__D_e</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>1</th>\n",
+       "      <td>-56.842105</td>\n",
+       "      <td>EX_glc__D_e</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>2</th>\n",
+       "      <td>-53.684211</td>\n",
+       "      <td>EX_glc__D_e</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>3</th>\n",
+       "      <td>-50.526316</td>\n",
+       "      <td>EX_glc__D_e</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>4</th>\n",
+       "      <td>-47.368421</td>\n",
+       "      <td>EX_glc__D_e</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>minimum</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>0.0</td>\n",
+       "    </tr>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "</div>"
+      ],
       "text/plain": [
-       "<matplotlib.figure.Figure at 0x7fd0c9b93748>"
+       "     EX_o2_e carbon_source  carbon_yield direction  flux  mass_yield\n",
+       "0 -60.000000   EX_glc__D_e           0.0   minimum   0.0         0.0\n",
+       "1 -56.842105   EX_glc__D_e           0.0   minimum   0.0         0.0\n",
+       "2 -53.684211   EX_glc__D_e           0.0   minimum   0.0         0.0\n",
+       "3 -50.526316   EX_glc__D_e           0.0   minimum   0.0         0.0\n",
+       "4 -47.368421   EX_glc__D_e           0.0   minimum   0.0         0.0"
       ]
      },
+     "execution_count": 5,
      "metadata": {},
-     "output_type": "display_data"
+     "output_type": "execute_result"
     }
    ],
    "source": [
-    "data.plot_matplotlib(\"Pastel1\")\n",
-    "data.plot_matplotlib(\"Dark2\");"
+    "prod_env.head()"
    ]
   },
   {
-   "cell_type": "markdown",
-   "metadata": {},
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
    "source": [
-    "The number of points which are plotted in each dimension can also be changed"
+    "%matplotlib inline"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 7,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9TaGFkaW5nIDYgMCBSIC9Gb250IDMgMCBSCi9Qcm9jU2V0IFsg\nL1BERiAvVGV4dCAvSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdIC9YT2JqZWN0IDcgMCBSCi9FeHRH\nU3RhdGUgNCAwIFIgL1BhdHRlcm4gNSAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9SZXNvdXJj\nZXMgOCAwIFIgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWyAwIDAgNDI0LjggMjgwLjggXQovQW5u\nb3RzIFsgXSAvR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAv [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXmYW+WZ5v072pdSSSqpNpfxio2NARsDxhgv4AXjNQQC\nJHRWmIR0dzqZmWuSr5drpjOke/rruXqm05NMJ186HdIJoZMQwuLdBsxijI3BBhu8YGO81yaV9l3n\nnO+PU0eWVNpql53zuy4uMFWvdCRXnUfP+97PfQuyLKOhoaGhoVFv6Mb7AjQ0NDQ0NEqhFSgNDQ0N\njbpEK1AaGhoaGnWJVqA0NDQ0NOoSrUBpaGhoaNQlhipf1yR+GhoaGhqjjVDqf2odlIaGhoZGXaIV\nKA0NDQ2NukQrUBoaGhoadYlWoDQ0NDQ06hKtQGloaGho1CVagdLQ0NDQqEu0AqWhoaGhUZdoBUpD\nQ0ND [...]
       "text/plain": [
-       "<matplotlib.figure.Figure at 0x7fd0c9a50cc0>"
+       "<matplotlib.axes._subplots.AxesSubplot at 0x10fc37630>"
+      ]
+     },
+     "execution_count": 7,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAELCAYAAAAiIMZEAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4VGX+/vH3J53QAiG0hEiAAAGlhtBEigUQhRWkKQpI\nty8/XXF1BYW1reLqiigiBFSqiKIgqNhpKUgPJUQgoUMglJD+/P5I9BsxkAmZyZmZfF7XlcvMmWfO\n3I8mt5Mzz5wjxhiUUkq5Fw+rAyillLI/LXellHJDWu5KKeWGtNyVUsoNabkrpZQb0nJXSik3ZFO5\ni0gvEdkjIokiMqmI+98QkS0FX3tF5Kz9oyqllLKVFLfOXUQ8gb3ArUAKEAsMNcbsusL4R4DWxpgH\n7JxVKaWUjWx55R4FJBpjkowxWcAioN9Vxg8FFtojnFJKqWvjZcOYYCC50O0UoH1RA0XkOiAM+O4K\n948F [...]
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0x10fc24a90>"
       ]
      },
      "metadata": {},
@@ -132,49 +298,18 @@
     }
    ],
    "source": [
-    "data = calculate_phenotype_phase_plane(\n",
-    "    model, \"EX_glc__D_e\", \"EX_o2_e\",\n",
-    "    reaction1_npoints=20, reaction2_npoints=20)\n",
-    "data.plot_matplotlib();"
+    "prod_env[prod_env.direction == 'maximum'].plot(\n",
+    "    kind='line', x='EX_o2_e', y='carbon_yield')"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The code can also use multiple processes to speed up calculations"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "took 0.44 seconds with 1 process\n",
-      "took 0.25 seconds with 4 process\n"
-     ]
-    }
-   ],
    "source": [
-    "start_time = time()\n",
-    "calculate_phenotype_phase_plane(\n",
-    "    model, \"EX_glc__D_e\", \"EX_o2_e\",\n",
-    "    reaction1_npoints=100, reaction2_npoints=100,\n",
-    "    n_processes=1)\n",
-    "print(\"took %.2f seconds with 1 process\" % (time() - start_time))\n",
-    "\n",
-    "start_time = time()\n",
-    "calculate_phenotype_phase_plane(\n",
-    "    model, \"EX_glc__D_e\", \"EX_o2_e\",\n",
-    "    reaction1_npoints=100, reaction2_npoints=100,\n",
-    "    n_processes=4)\n",
-    "print(\"took %.2f seconds with 4 process\" % (time() - start_time))"
+    "Previous versions of cobrapy included more tailored plots for phase planes which have now been dropped in order to improve maintainability and enhance the focus of cobrapy. Plotting for cobra models is intended for another package."
    ]
   }
  ],
@@ -194,7 +329,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.5.2"
   }
  },
  "nbformat": 4,
diff --git a/documentation_builder/plot_helper.py b/documentation_builder/plot_helper.py
index 0e56682..d938bfa 100644
--- a/documentation_builder/plot_helper.py
+++ b/documentation_builder/plot_helper.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 from matplotlib.pyplot import figure, xlim, ylim, gca, arrow, text, scatter
 from mpl_toolkits.axes_grid.axislines import SubplotZero
 from numpy import linspace, arange, sqrt, pi, sin, cos, sign
@@ -45,11 +46,12 @@ def plot_qp2():
     ax.plot([0.5], [1.5], 'bo')
 
     yrange = linspace(1, 2, 11)
-    for r in (yrange ** 2 / 2. - yrange):
+    for r in [y ** 2 / 2. - y for y in yrange]:
         t = linspace(-sqrt(2 * r + 1) + 0.000001,
                      sqrt(2 * r + 1) - 0.000001, 1000)
-        ax.plot(abs(t), 1 + sqrt(2 * r + 1 - t ** 2) * sign(t), '-.',
-                color="gray")
+        ax.plot(abs(t), [1 + sqrt(abs(2 * r + 1 - x ** 2)) *
+                         sign(x) for x in t],
+                '-.', color="gray")
 
 
 def plot_loop():
diff --git a/documentation_builder/pymatbridge.ipynb b/documentation_builder/pymatbridge.ipynb
index 51a7731..b028610 100644
--- a/documentation_builder/pymatbridge.ipynb
+++ b/documentation_builder/pymatbridge.ipynb
@@ -228,7 +228,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.5.2"
   }
  },
  "nbformat": 4,
diff --git a/documentation_builder/qp.ipynb b/documentation_builder/qp.ipynb
deleted file mode 100644
index 916b95f..0000000
--- a/documentation_builder/qp.ipynb
+++ /dev/null
@@ -1,287 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Quadratic Programming\n",
-    "\n",
-    "Suppose we want to minimize the Euclidean distance of the solution to the origin while subject to linear constraints. This will require a quadratic objective function. Consider this example problem:\n",
-    "\n",
-    "> **min** $\\frac{1}{2}\\left(x^2 + y^2 \\right)$\n",
-    "\n",
-    "> *subject to*\n",
-    "\n",
-    "> $x + y = 2$\n",
-    "\n",
-    "> $x \\ge 0$\n",
-    "\n",
-    "> $y \\ge 0$\n",
-    "\n",
-    "This problem can be visualized graphically:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1R5cGUgL0NhdGFsb2cgL1BhZ2VzIDIgMCBSID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9YT2JqZWN0IDcgMCBSIC9Qcm9jU2V0IFsgL1BERiAvVGV4dCAv\nSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdCi9Gb250IDMgMCBSIC9TaGFkaW5nIDYgMCBSIC9QYXR0\nZXJuIDUgMCBSIC9FeHRHU3RhdGUgNCAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9QYXJlbnQg\nMiAwIFIgL1Jlc291cmNlcyA4IDAgUiAvVHlwZSAvUGFnZSAvQ29udGVudHMgOSAwIFIKL01lZGlh\nQm94IFsgMCAwIDM1My44ODgwNjgxODE4IDI5NC4yMTgxODE4MTgyIF0KL0dyb3VwIDw8IC9UeXBl [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWEAAAEmCAYAAACzoiEDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvWd0VFea7/2vUs45gEQSIkgIiZzBYGySARvbGNsYGxyw\n3R67e7qn58Ost+9Md6+5d9b03L6mG7dtEDbG2JgkwASBABElEQXKQhEJZamkqlLlqnP2+0EtGlEl\nUNgnVGn/1vLq5pT07K2S6jn7POH/KAghEBKFQqEAAs3AHg9gBYAYAKr3CCHfCLowg8FgOAEKEZzw\nBgA/AUF/v2IAoLxHiGmioAszGAyGEyC4EwYAhUIxGYA7gFwA0wBUEUI0gi/MYDAYMkcUJ/xwMYWC\nEEIUoi3IYDAYMkcp9QYYDAZjKMOcMIPBYEiI+wC+Z7DxC/HiHwwGgyEfHIZi2UmYwWAwJIQ5YQaD\nwZAQ [...]
-      "text/plain": [
-       "<matplotlib.figure.Figure at 0x7f97b409d8d0>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "%matplotlib inline\n",
-    "import plot_helper\n",
-    "\n",
-    "plot_helper.plot_qp1()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The objective can be rewritten as $\\frac{1}{2} v^T \\cdot \\mathbf Q \\cdot v$, where\n",
-    "$v = \\left(\\begin{matrix} x \\\\ y\\end{matrix} \\right)$ and\n",
-    "$\\mathbf Q = \\left(\\begin{matrix} 1 & 0\\\\ 0 & 1 \\end{matrix}\\right)$\n",
-    "\n",
-    "The matrix $\\mathbf Q$ can be passed into a cobra model as the quadratic objective."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import scipy\n",
-    "\n",
-    "from cobra import Reaction, Metabolite, Model, solvers"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The quadratic objective $\\mathbf Q$ should be formatted as a scipy sparse matrix."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<2x2 sparse matrix of type '<class 'numpy.float64'>'\n",
-       "\twith 2 stored elements in Dictionary Of Keys format>"
-      ]
-     },
-     "execution_count": 3,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "Q = scipy.sparse.eye(2).todok()\n",
-    "Q"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In this case, the quadratic objective is simply the identity matrix"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "matrix([[ 1.,  0.],\n",
-       "        [ 0.,  1.]])"
-      ]
-     },
-     "execution_count": 4,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "Q.todense()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We need to use a solver that supports quadratic programming, such as gurobi or cplex. If a solver which supports quadratic programming is installed, this function will return its name."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "cplex\n"
-     ]
-    }
-   ],
-   "source": [
-    "print(solvers.get_solver_name(qp=True))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'x': 1.0, 'y': 1.0}"
-      ]
-     },
-     "execution_count": 6,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "c = Metabolite(\"c\")\n",
-    "c._bound = 2\n",
-    "x = Reaction(\"x\")\n",
-    "y = Reaction(\"y\")\n",
-    "x.add_metabolites({c: 1})\n",
-    "y.add_metabolites({c: 1})\n",
-    "m = Model()\n",
-    "m.add_reactions([x, y])\n",
-    "sol = m.optimize(quadratic_component=Q, objective_sense=\"minimize\")\n",
-    "sol.x_dict"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Suppose we change the problem to have a mixed linear and quadratic objective.\n",
-    "\n",
-    "> **min** $\\frac{1}{2}\\left(x^2 + y^2 \\right) - y$\n",
-    "\n",
-    "> *subject to*\n",
-    "\n",
-    "> $x + y = 2$\n",
-    "\n",
-    "> $x \\ge 0$\n",
-    "\n",
-    "> $y \\ge 0$\n",
-    "\n",
-    "Graphically, this would be"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1R5cGUgL0NhdGFsb2cgL1BhZ2VzIDIgMCBSID4+\nCmVuZG9iago4IDAgb2JqCjw8IC9YT2JqZWN0IDcgMCBSIC9Qcm9jU2V0IFsgL1BERiAvVGV4dCAv\nSW1hZ2VCIC9JbWFnZUMgL0ltYWdlSSBdCi9Gb250IDMgMCBSIC9TaGFkaW5nIDYgMCBSIC9QYXR0\nZXJuIDUgMCBSIC9FeHRHU3RhdGUgNCAwIFIgPj4KZW5kb2JqCjEwIDAgb2JqCjw8IC9QYXJlbnQg\nMiAwIFIgL1Jlc291cmNlcyA4IDAgUiAvVHlwZSAvUGFnZSAvQ29udGVudHMgOSAwIFIKL01lZGlh\nQm94IFsgMCAwIDM1My44ODgwNjgxODE4IDI5NC4yMTgxODE4MTgyIF0KL0dyb3VwIDw8IC9UeXBl [...]
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWEAAAEmCAYAAACzoiEDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtU1Ne9N/73F2aAYQBBUFBQLiLxCoLmotEYjcZbjGk0\nF01N09rYpGfl/Pr09HQ9z1mr67R9nl+7znp+5zRpbGpSk6aJRjFNjOKNaLxfQ/ACAnK/CcN9mAvM\n7Tvz/f3BwYozKuzvbWb4vNbqapiZ794bhM98Z3/2/mxOEATIieM4DohxAJ9ogZUAkgF0bxEE4SNZ\nOyaEkADAKRCEXwKwBxjz34/0AwipFAT7NFk7JoSQACB7EAYAjuNmA9AAuAIgD0CdIAgm2TsmhBA/\np0gQvt0ZxwmCIHCKdUgIIX4uRO0BEELIaEZBmBBCVKRhuEbs/IVy8x+EEOI/fE7F0p0wIYSoiIIw\nIYSo [...]
-      "text/plain": [
-       "<matplotlib.figure.Figure at 0x7f9796f1e400>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "plot_helper.plot_qp2()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "QP solvers in cobrapy will combine linear and quadratic coefficients. The linear portion will be obtained from the same objective_coefficient attribute used with LP's."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'x': 0.5, 'y': 1.5}"
-      ]
-     },
-     "execution_count": 8,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "y.objective_coefficient = -1\n",
-    "sol = m.optimize(quadratic_component=Q, objective_sense=\"minimize\")\n",
-    "sol.x_dict"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.4.3"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/documentation_builder/sampling.ipynb b/documentation_builder/sampling.ipynb
new file mode 100644
index 0000000..fd1fc35
--- /dev/null
+++ b/documentation_builder/sampling.ipynb
@@ -0,0 +1,576 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Flux sampling"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Basic usage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The easiest way to get started with flux sampling is using the `sample` function in the `flux_analysis` submodule. `sample` takes at least two arguments: a cobra model and the number of samples you want to generate."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "<div>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>ACALD</th>\n",
+       "      <th>ACALDt</th>\n",
+       "      <th>ACKr</th>\n",
+       "      <th>ACONTa</th>\n",
+       "      <th>ACONTb</th>\n",
+       "      <th>ACt2r</th>\n",
+       "      <th>ADK1</th>\n",
+       "      <th>AKGDH</th>\n",
+       "      <th>AKGt2r</th>\n",
+       "      <th>ALCD2x</th>\n",
+       "      <th>...</th>\n",
+       "      <th>RPI</th>\n",
+       "      <th>SUCCt2_2</th>\n",
+       "      <th>SUCCt3</th>\n",
+       "      <th>SUCDi</th>\n",
+       "      <th>SUCOAS</th>\n",
+       "      <th>TALA</th>\n",
+       "      <th>THD2</th>\n",
+       "      <th>TKT1</th>\n",
+       "      <th>TKT2</th>\n",
+       "      <th>TPI</th>\n",
+       "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
+       "    <tr>\n",
+       "      <th>0</th>\n",
+       "      <td>-3.706944</td>\n",
+       "      <td>-0.163964</td>\n",
+       "      <td>-0.295823</td>\n",
+       "      <td>8.975852</td>\n",
+       "      <td>8.975852</td>\n",
+       "      <td>-0.295823</td>\n",
+       "      <td>4.847986</td>\n",
+       "      <td>6.406533</td>\n",
+       "      <td>-0.081797</td>\n",
+       "      <td>-3.542980</td>\n",
+       "      <td>...</td>\n",
+       "      <td>-1.649393</td>\n",
+       "      <td>20.917568</td>\n",
+       "      <td>20.977290</td>\n",
+       "      <td>744.206008</td>\n",
+       "      <td>-6.406533</td>\n",
+       "      <td>1.639515</td>\n",
+       "      <td>1.670533</td>\n",
+       "      <td>1.639515</td>\n",
+       "      <td>1.635542</td>\n",
+       "      <td>6.256787</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>1</th>\n",
+       "      <td>-1.340710</td>\n",
+       "      <td>-0.175665</td>\n",
+       "      <td>-0.429169</td>\n",
+       "      <td>11.047827</td>\n",
+       "      <td>11.047827</td>\n",
+       "      <td>-0.429169</td>\n",
+       "      <td>2.901598</td>\n",
+       "      <td>7.992916</td>\n",
+       "      <td>-0.230564</td>\n",
+       "      <td>-1.165045</td>\n",
+       "      <td>...</td>\n",
+       "      <td>-0.066975</td>\n",
+       "      <td>24.735567</td>\n",
+       "      <td>24.850041</td>\n",
+       "      <td>710.481004</td>\n",
+       "      <td>-7.992916</td>\n",
+       "      <td>0.056442</td>\n",
+       "      <td>9.680476</td>\n",
+       "      <td>0.056442</td>\n",
+       "      <td>0.052207</td>\n",
+       "      <td>7.184752</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>2</th>\n",
+       "      <td>-1.964087</td>\n",
+       "      <td>-0.160334</td>\n",
+       "      <td>-0.618029</td>\n",
+       "      <td>9.811474</td>\n",
+       "      <td>9.811474</td>\n",
+       "      <td>-0.618029</td>\n",
+       "      <td>17.513791</td>\n",
+       "      <td>8.635576</td>\n",
+       "      <td>-0.284992</td>\n",
+       "      <td>-1.803753</td>\n",
+       "      <td>...</td>\n",
+       "      <td>-4.075515</td>\n",
+       "      <td>23.425719</td>\n",
+       "      <td>23.470968</td>\n",
+       "      <td>696.114154</td>\n",
+       "      <td>-8.635576</td>\n",
+       "      <td>4.063291</td>\n",
+       "      <td>52.316496</td>\n",
+       "      <td>4.063291</td>\n",
+       "      <td>4.058376</td>\n",
+       "      <td>5.122237</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>3</th>\n",
+       "      <td>-0.838442</td>\n",
+       "      <td>-0.123865</td>\n",
+       "      <td>-0.376067</td>\n",
+       "      <td>11.869552</td>\n",
+       "      <td>11.869552</td>\n",
+       "      <td>-0.376067</td>\n",
+       "      <td>7.769872</td>\n",
+       "      <td>9.765178</td>\n",
+       "      <td>-0.325219</td>\n",
+       "      <td>-0.714577</td>\n",
+       "      <td>...</td>\n",
+       "      <td>-0.838094</td>\n",
+       "      <td>23.446704</td>\n",
+       "      <td>23.913036</td>\n",
+       "      <td>595.787313</td>\n",
+       "      <td>-9.765178</td>\n",
+       "      <td>0.822987</td>\n",
+       "      <td>36.019720</td>\n",
+       "      <td>0.822987</td>\n",
+       "      <td>0.816912</td>\n",
+       "      <td>8.364314</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>4</th>\n",
+       "      <td>-0.232088</td>\n",
+       "      <td>-0.034346</td>\n",
+       "      <td>-1.067684</td>\n",
+       "      <td>7.972039</td>\n",
+       "      <td>7.972039</td>\n",
+       "      <td>-1.067684</td>\n",
+       "      <td>5.114975</td>\n",
+       "      <td>5.438125</td>\n",
+       "      <td>-0.787864</td>\n",
+       "      <td>-0.197742</td>\n",
+       "      <td>...</td>\n",
+       "      <td>-3.109205</td>\n",
+       "      <td>8.902309</td>\n",
+       "      <td>9.888083</td>\n",
+       "      <td>584.552692</td>\n",
+       "      <td>-5.438125</td>\n",
+       "      <td>3.088152</td>\n",
+       "      <td>12.621811</td>\n",
+       "      <td>3.088152</td>\n",
+       "      <td>3.079686</td>\n",
+       "      <td>6.185089</td>\n",
+       "    </tr>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "<p>5 rows × 95 columns</p>\n",
+       "</div>"
+      ],
+      "text/plain": [
+       "      ACALD    ACALDt      ACKr     ACONTa     ACONTb     ACt2r       ADK1  \\\n",
+       "0 -3.706944 -0.163964 -0.295823   8.975852   8.975852 -0.295823   4.847986   \n",
+       "1 -1.340710 -0.175665 -0.429169  11.047827  11.047827 -0.429169   2.901598   \n",
+       "2 -1.964087 -0.160334 -0.618029   9.811474   9.811474 -0.618029  17.513791   \n",
+       "3 -0.838442 -0.123865 -0.376067  11.869552  11.869552 -0.376067   7.769872   \n",
+       "4 -0.232088 -0.034346 -1.067684   7.972039   7.972039 -1.067684   5.114975   \n",
+       "\n",
+       "      AKGDH    AKGt2r    ALCD2x    ...          RPI   SUCCt2_2     SUCCt3  \\\n",
+       "0  6.406533 -0.081797 -3.542980    ...    -1.649393  20.917568  20.977290   \n",
+       "1  7.992916 -0.230564 -1.165045    ...    -0.066975  24.735567  24.850041   \n",
+       "2  8.635576 -0.284992 -1.803753    ...    -4.075515  23.425719  23.470968   \n",
+       "3  9.765178 -0.325219 -0.714577    ...    -0.838094  23.446704  23.913036   \n",
+       "4  5.438125 -0.787864 -0.197742    ...    -3.109205   8.902309   9.888083   \n",
+       "\n",
+       "        SUCDi    SUCOAS      TALA       THD2      TKT1      TKT2       TPI  \n",
+       "0  744.206008 -6.406533  1.639515   1.670533  1.639515  1.635542  6.256787  \n",
+       "1  710.481004 -7.992916  0.056442   9.680476  0.056442  0.052207  7.184752  \n",
+       "2  696.114154 -8.635576  4.063291  52.316496  4.063291  4.058376  5.122237  \n",
+       "3  595.787313 -9.765178  0.822987  36.019720  0.822987  0.816912  8.364314  \n",
+       "4  584.552692 -5.438125  3.088152  12.621811  3.088152  3.079686  6.185089  \n",
+       "\n",
+       "[5 rows x 95 columns]"
+      ]
+     },
+     "execution_count": 1,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from cobra.test import create_test_model\n",
+    "from cobra.flux_analysis import sample\n",
+    "\n",
+    "model = create_test_model(\"textbook\")\n",
+    "s = sample(model, 100)\n",
+    "s.head()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "By default sample uses the `optgp` method based on the [method presented here](http://dx.doi.org/10.1371/journal.pone.0086587) as it is suited for larger models and can run in parallel. By default the sampler uses a single process. This can be changed by using the `processes` argument."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "One process:\n",
+      "CPU times: user 5.31 s, sys: 433 ms, total: 5.74 s\n",
+      "Wall time: 5.27 s\n",
+      "Two processes:\n",
+      "CPU times: user 217 ms, sys: 488 ms, total: 705 ms\n",
+      "Wall time: 2.8 s\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\"One process:\")\n",
+    "%time s = sample(model, 1000)\n",
+    "print(\"Two processes:\")\n",
+    "%time s = sample(model, 1000, processes=2)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Alternatively you can also user Artificial Centering Hit-and-Run for sampling by setting the method to `achr`.  `achr` does not support parallel execution but has good convergence and is almost Markovian."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "s = sample(model, 100, method=\"achr\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In general setting up the sampler is expensive since initial search directions are generated by solving many linear programming problems. Thus, we recommend to generate as many samples as possible in one go. However, this might require finer control over the sampling procedure as described in the following section."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Advanced usage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Sampler objects"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The sampling process can be controlled on a lower level by using the sampler classes directly."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from cobra.flux_analysis.sampling import OptGPSampler, ACHRSampler"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Both sampler classes have standardized interfaces and take some additional argument. For instance the `thinning` factor. \"Thinning\" means only recording samples every n iterations. A higher thinning factors mean less correlated samples but also larger computation times. By default the samplers use a thinning factor of 100 which creates roughly uncorrelated samples. If you want less samples but better mixing feel free to increase this parameter. If you want to study convergence for [...]
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "achr = ACHRSampler(model, thinning=10)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`OptGPSampler` has an additional `processes` argument specifying how many processes are used to create parallel sampling chains. This should be in the order of your CPU cores for maximum efficiency. As noted before class initialization can take up to a few minutes due to generation of initial search directions. Sampling on the other hand is quick."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "optgp = OptGPSampler(model, processes=4)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Sampling and validation"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Both samplers have a sample function that generates samples from the initialized object and act like the `sample` function described above, only that this time it will only accept a single argument, the number of samples. For `OptGPSampler` the number of samples should be a multiple of the number of processes, otherwise it will be increased to the nearest multiple automatically."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "s1 = achr.sample(100)\n",
+    "\n",
+    "s2 = optgp.sample(100)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "You can call `sample` repeatedly and both samplers are optimized to generate large amount of samples without falling into \"numerical traps\". All sampler objects have a `validate` function in order to check if a set of points are feasible and give detailed information about feasibility violations in a form of a short code denoting feasibility. Here the short code is a combination of any of the following letters:\n",
+    "\n",
+    "- \"v\" - valid point\n",
+    "- \"l\" - lower bound violation\n",
+    "- \"u\" - upper bound violation\n",
+    "- \"e\" - equality violation (meaning the point is not a steady state)\n",
+    "\n",
+    "For instance for a random flux distribution (should not be feasible):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array(['le'], \n",
+       "      dtype='<U3')"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "\n",
+    "bad = np.random.uniform(-1000, 1000, size=len(model.reactions))\n",
+    "achr.validate(np.atleast_2d(bad))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And for our generated samples:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array(['v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v',\n",
+       "       'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v', 'v'], \n",
+       "      dtype='<U3')"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "achr.validate(s1)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Batch sampling"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Sampler objects are made for generating billions of samples, however using the `sample` function might quickly fill up your RAM when working with genome-scale models. Here, the `batch` method of the sampler objects might come in handy. `batch` takes two arguments, the number of samples in each batch and the number of batches. This will make sense with a small example. \n",
+    "\n",
+    "Let's assume we want to quantify what proportion of our samples will grow. For that we might want to generate 10 batches of 50 samples each and measure what percentage of the individual 100 samples show a growth rate larger than 0.1. Finally, we want to calculate the mean and standard deviation of those individual percentages."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Usually 8.70% +- 2.72% grow...\n"
+     ]
+    }
+   ],
+   "source": [
+    "counts = [np.mean(s.Biomass_Ecoli_core > 0.1) for s in optgp.batch(100, 10)]\n",
+    "print(\"Usually {:.2f}% +- {:.2f}% grow...\".format(\n",
+    "    np.mean(counts) * 100.0, np.std(counts) * 100.0))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Adding constraints"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Flux sampling will respect additional contraints defined in the model. For instance we can add a constraint enforcing growth in asimilar manner as the section before."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "co = model.problem.Constraint(model.reactions.Biomass_Ecoli_core.flux_expression, lb=0.1)\n",
+    "model.add_cons_vars([co])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "*Note that this is only for demonstration purposes. usually you could set the lower bound of the reaction directly instead of creating a new constraint.*"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "0    0.175547\n",
+      "1    0.111499\n",
+      "2    0.123073\n",
+      "3    0.151874\n",
+      "4    0.122541\n",
+      "5    0.121878\n",
+      "6    0.147333\n",
+      "7    0.106499\n",
+      "8    0.174448\n",
+      "9    0.143273\n",
+      "Name: Biomass_Ecoli_core, dtype: float64\n"
+     ]
+    }
+   ],
+   "source": [
+    "s = sample(model, 10)\n",
+    "print(s.Biomass_Ecoli_core)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As we can see our new constraint was respected."
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/documentation_builder/simulating.ipynb b/documentation_builder/simulating.ipynb
index 03a5659..3deea72 100644
--- a/documentation_builder/simulating.ipynb
+++ b/documentation_builder/simulating.ipynb
@@ -4,22 +4,24 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Simulating with FBA\n",
-    "\n",
-    "Simulations using flux balance analysis can be solved using Model.optimize(). This will maximize or minimize (maximizing is the default) flux through the objective reactions."
+    "# Simulating with FBA"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Simulations using flux balance analysis can be solved using `Model.optimize()`. This will maximize or minimize (maximizing is the default) flux through the objective reactions."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
-    "import pandas\n",
-    "pandas.options.display.max_rows = 100\n",
-    "\n",
     "import cobra.test\n",
     "model = cobra.test.create_test_model(\"textbook\")"
    ]
@@ -34,57 +36,49 @@
   {
    "cell_type": "code",
    "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
-     "data": {
-      "text/plain": [
-       "<Solution 0.87 at 0x10ddd0080>"
-      ]
-     },
-     "execution_count": 2,
-     "metadata": {},
-     "output_type": "execute_result"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "<Solution 0.874 at 0x112eb3d30>\n"
+     ]
     }
    ],
    "source": [
-    "model.optimize()"
+    "solution = model.optimize()\n",
+    "print(solution)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The Model.optimize() function will return a Solution object, which will also be stored at model.solution. A solution object has several attributes:\n",
+    "The Model.optimize() function will return a Solution object. A solution object has several attributes:\n",
     "\n",
-    " - f: the objective value\n",
-    " - status: the status from the linear programming solver\n",
-    " - x_dict: a dictionary of {reaction_id: flux_value} (also called \"primal\")\n",
-    " - x: a list for x_dict\n",
-    " - y_dict: a dictionary of {metabolite_id: dual_value}.\n",
-    " - y: a list for y_dict"
+    " - `objective_value`: the objective value\n",
+    " - `status`: the status from the linear programming solver\n",
+    " - `fluxes`: a pandas series with flux indexed by reaction identifier. The flux for a reaction variable is the difference of the primal values for the forward and reverse reaction variables.\n",
+    " - `shadow_prices`: a pandas series with shadow price indexed by the metabolite identifier."
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "For example, after the last call to model.optimize(), the status should be 'optimal' if the solver returned no errors, and f should be the objective value"
+    "For example, after the last call to `model.optimize()`, if the optimization succeeds it's status will be optimal. In case the model is infeasible an error is raised."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "'optimal'"
+       "0.8739215069684307"
       ]
      },
      "execution_count": 3,
@@ -93,20 +87,33 @@
     }
    ],
    "source": [
-    "model.solution.status"
+    "solution.objective_value"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The solvers that can be used with cobrapy are so fast that for many small to mid-size models computing the solution can be even faster than it takes to collect the values from the solver and convert to them python objects. With `model.optimize`, we gather values for all reactions and metabolites and that can take a significant amount of time if done repeatedly. If we are only interested in the flux value of a single reaction or the objective, it is faster to instead use `model.slim_ [...]
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 4,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "CPU times: user 3.84 ms, sys: 672 µs, total: 4.51 ms\n",
+      "Wall time: 6.16 ms\n"
+     ]
+    },
+    {
      "data": {
       "text/plain": [
-       "0.8739215069684305"
+       "0.8739215069684307"
       ]
      },
      "execution_count": 4,
@@ -115,33 +122,68 @@
     }
    ],
    "source": [
-    "model.solution.f"
+    "%%time\n",
+    "model.optimize().objective_value"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "CPU times: user 229 µs, sys: 19 µs, total: 248 µs\n",
+      "Wall time: 257 µs\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "0.8739215069684307"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "%%time\n",
+    "model.slim_optimize()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Analyzing FBA solutions"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Analyzing FBA solutions\n",
     "Models solved using FBA can be further analyzed by using summary methods, which output printed text to give a quick representation of model behavior. Calling the summary method on the entire model displays information on the input and output behavior of the model, along with the optimized objective."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 6,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "IN FLUXES                     OUT FLUXES                    OBJECTIVES          \n",
-      "o2_e       -21.80             h2o_e    29.18                Biomass_Ecoli_core    0.874\n",
-      "glc__D_e   -10.00             co2_e    22.81                                    \n",
-      "nh4_e       -4.77             h_e      17.53                                    \n",
-      "pi_e        -3.21                                                               \n"
+      "IN FLUXES        OUT FLUXES    OBJECTIVES\n",
+      "---------------  ------------  ----------------------\n",
+      "o2_e      21.8   h2o_e  29.2   Biomass_Ecol...  0.874\n",
+      "glc__D_e  10     co2_e  22.8\n",
+      "nh4_e      4.77  h_e    17.5\n",
+      "pi_e       3.21\n"
      ]
     }
    ],
@@ -158,28 +200,28 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 7,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "PRODUCING REACTIONS -- Nicotinamide adenine dinucleotide - reduced\n",
-      "------------------------------------------------------------------\n",
-      "  %      FLUX   RXN ID                        REACTION                       \n",
-      " 41.6%     16     GAPD        g3p_c + nad_c + pi_c <=> 13dpg_c + h_c + nadh_c\n",
-      " 24.1%    9.3      PDH     coa_c + nad_c + pyr_c --> accoa_c + co2_c + nadh_c\n",
-      " 13.1%    5.1    AKGDH    akg_c + coa_c + nad_c --> co2_c + nadh_c + succoa_c\n",
-      " 13.1%    5.1      MDH              mal__L_c + nad_c <=> h_c + nadh_c + oaa_c\n",
-      "  8.0%    3.1 Bioma...   1.496 3pg_c + 3.7478 accoa_c + 59.81 atp_c + 0.36...\n",
+      "PRODUCING REACTIONS -- Nicotinamide adenine dinucleotide - reduced (nadh_c)\n",
+      "---------------------------------------------------------------------------\n",
+      "%       FLUX  RXN ID      REACTION\n",
+      "----  ------  ----------  --------------------------------------------------\n",
+      "42%    16     GAPD        g3p_c + nad_c + pi_c <=> 13dpg_c + h_c + nadh_c\n",
+      "24%     9.28  PDH         coa_c + nad_c + pyr_c --> accoa_c + co2_c + nadh_c\n",
+      "13%     5.06  AKGDH       akg_c + coa_c + nad_c --> co2_c + nadh_c + succ...\n",
+      "13%     5.06  MDH         mal__L_c + nad_c <=> h_c + nadh_c + oaa_c\n",
+      "8%      3.1   Biomass...  1.496 3pg_c + 3.7478 accoa_c + 59.81 atp_c + 0....\n",
       "\n",
-      "CONSUMING REACTIONS -- Nicotinamide adenine dinucleotide - reduced\n",
-      "------------------------------------------------------------------\n",
-      "  %      FLUX   RXN ID                        REACTION                       \n",
-      "100.0%    -39   NADH16   4.0 h_c + nadh_c + q8_c --> 3.0 h_e + nad_c + q8h2_c\n"
+      "CONSUMING REACTIONS -- Nicotinamide adenine dinucleotide - reduced (nadh_c)\n",
+      "---------------------------------------------------------------------------\n",
+      "%       FLUX  RXN ID      REACTION\n",
+      "----  ------  ----------  --------------------------------------------------\n",
+      "100%   38.5   NADH16      4.0 h_c + nadh_c + q8_c --> 3.0 h_e + nad_c + q...\n"
      ]
     }
    ],
@@ -196,29 +238,30 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 8,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "PRODUCING REACTIONS -- ATP\n",
-      "--------------------------\n",
-      "  %      FLUX   RXN ID                        REACTION                       \n",
-      " 66.6%     46   ATPS4r     adp_c + 4.0 h_e + pi_c <=> atp_c + h2o_c + 3.0 h_c\n",
-      " 23.4%     16      PGK                      3pg_c + atp_c <=> 13dpg_c + adp_c\n",
-      "  7.4%    5.1   SUCOAS     atp_c + coa_c + succ_c <=> adp_c + pi_c + succoa_c\n",
-      "  2.6%    1.8      PYK                  adp_c + h_c + pep_c --> atp_c + pyr_c\n",
+      "PRODUCING REACTIONS -- ATP (atp_c)\n",
+      "----------------------------------\n",
+      "%      FLUX  RXN ID      REACTION\n",
+      "---  ------  ----------  --------------------------------------------------\n",
+      "67%  45.5    ATPS4r      adp_c + 4.0 h_e + pi_c <=> atp_c + h2o_c + 3.0 h_c\n",
+      "23%  16      PGK         3pg_c + atp_c <=> 13dpg_c + adp_c\n",
+      "7%    5.06   SUCOAS      atp_c + coa_c + succ_c <=> adp_c + pi_c + succoa_c\n",
+      "3%    1.76   PYK         adp_c + h_c + pep_c --> atp_c + pyr_c\n",
       "\n",
-      "CONSUMING REACTIONS -- ATP\n",
-      "--------------------------\n",
-      "  %      FLUX   RXN ID                        REACTION                       \n",
-      " 76.5%    -52 Bioma...   1.496 3pg_c + 3.7478 accoa_c + 59.81 atp_c + 0.36...\n",
-      " 12.3%   -8.4     ATPM                   atp_c + h2o_c --> adp_c + h_c + pi_c\n",
-      " 10.9%   -7.5      PFK                  atp_c + f6p_c --> adp_c + fdp_c + h_c\n"
+      "CONSUMING REACTIONS -- ATP (atp_c)\n",
+      "----------------------------------\n",
+      "%      FLUX  RXN ID      REACTION\n",
+      "---  ------  ----------  --------------------------------------------------\n",
+      "76%  52.3    Biomass...  1.496 3pg_c + 3.7478 accoa_c + 59.81 atp_c + 0....\n",
+      "12%   8.39   ATPM        atp_c + h2o_c --> adp_c + h_c + pi_c\n",
+      "11%   7.48   PFK         atp_c + f6p_c --> adp_c + fdp_c + h_c\n",
+      "0%    0.223  GLNS        atp_c + glu__L_c + nh4_c --> adp_c + gln__L_c +...\n"
      ]
     }
    ],
@@ -230,16 +273,21 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Changing the Objectives\n",
-    "\n",
+    "## Changing the Objectives"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "The objective function is determined from the objective_coefficient attribute of the objective reaction(s). Generally, a \"biomass\" function which describes the composition of metabolites which make up a cell is used."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": 9,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -250,52 +298,49 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Currently in the model, there is only one objective reaction (the biomass reaction), with an objective coefficient of 1."
+    "Currently in the model, there is only one reaction in the objective (the biomass reaction), with an linear coefficient of 1."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 10,
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "{<Reaction Biomass_Ecoli_core at 0x116510828>: 1.0}"
+       "{<Reaction Biomass_Ecoli_core at 0x112eab4a8>: 1.0}"
       ]
      },
-     "execution_count": 9,
+     "execution_count": 10,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "model.objective"
+    "from cobra.util.solver import linear_reaction_coefficients\n",
+    "linear_reaction_coefficients(model)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The objective function can be changed by assigning Model.objective, which can be a reaction object (or just it's name), or a dict of {Reaction: objective_coefficient}."
+    "The objective function can be changed by assigning Model.objective, which can be a reaction object (or just it's name), or a `dict` of `{Reaction: objective_coefficient}`."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 11,
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "{<Reaction ATPM at 0x1165107b8>: 1}"
+       "{<Reaction ATPM at 0x112eab470>: 1.0}"
       ]
      },
-     "execution_count": 10,
+     "execution_count": 11,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -307,69 +352,47 @@
     "# The upper bound should be 1000, so that we get\n",
     "# the actual optimal value\n",
     "model.reactions.get_by_id(\"ATPM\").upper_bound = 1000.\n",
-    "model.objective"
+    "linear_reaction_coefficients(model)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 12,
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "174.99999999999997"
+       "174.99999999999966"
       ]
      },
-     "execution_count": 11,
+     "execution_count": 12,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "model.optimize().f"
+    "model.optimize().objective_value"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The objective function can also be changed by setting Reaction.objective_coefficient directly."
+    "We can also have more complicated objectives including quadratic terms."
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{<Reaction Biomass_Ecoli_core at 0x116510828>: 1.0}"
-      ]
-     },
-     "execution_count": 12,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
-    "model.reactions.get_by_id(\"ATPM\").objective_coefficient = 0.\n",
-    "biomass_rxn.objective_coefficient = 1.\n",
-    "\n",
-    "model.objective"
+    "## Running FVA"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Running FVA\n",
-    "\n",
     "FBA will not give always give unique solution, because multiple flux states can achieve the same optimum. FVA (or flux variability analysis) finds the ranges of each metabolic flux at the optimum."
    ]
   },
@@ -377,13 +400,35 @@
    "cell_type": "code",
    "execution_count": 13,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
+   "outputs": [],
+   "source": [
+    "from cobra.flux_analysis import flux_variability_analysis"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/html": [
        "<div>\n",
+       "<style>\n",
+       "    .dataframe thead tr:only-child th {\n",
+       "        text-align: right;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe thead th {\n",
+       "        text-align: left;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe tbody tr th {\n",
+       "        vertical-align: top;\n",
+       "    }\n",
+       "</style>\n",
        "<table border=\"1\" class=\"dataframe\">\n",
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
@@ -395,161 +440,110 @@
        "  <tbody>\n",
        "    <tr>\n",
        "      <th>ACALD</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>-2.208811e-30</td>\n",
+       "      <td>-5.247085e-14</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACALDt</th>\n",
-       "      <td>-0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-5.247085e-14</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACKr</th>\n",
-       "      <td>-0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-8.024953e-14</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACONTa</th>\n",
-       "      <td>6.00725</td>\n",
-       "      <td>6.00725</td>\n",
+       "      <td>2.000000e+01</td>\n",
+       "      <td>2.000000e+01</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACONTb</th>\n",
-       "      <td>6.00725</td>\n",
-       "      <td>6.00725</td>\n",
+       "      <td>2.000000e+01</td>\n",
+       "      <td>2.000000e+01</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACt2r</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-8.024953e-14</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ADK1</th>\n",
-       "      <td>-0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>3.410605e-13</td>\n",
+       "      <td>0.000000e+00</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>AKGDH</th>\n",
-       "      <td>5.06438</td>\n",
-       "      <td>5.06438</td>\n",
+       "      <td>2.000000e+01</td>\n",
+       "      <td>2.000000e+01</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>AKGt2r</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-2.902643e-14</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ALCD2x</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>0.00000</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ATPM</th>\n",
-       "      <td>8.39000</td>\n",
-       "      <td>8.39000</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ATPS4r</th>\n",
-       "      <td>45.51401</td>\n",
-       "      <td>45.51401</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>Biomass_Ecoli_core</th>\n",
-       "      <td>0.87392</td>\n",
-       "      <td>0.87392</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>CO2t</th>\n",
-       "      <td>-22.80983</td>\n",
-       "      <td>-22.80983</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>CS</th>\n",
-       "      <td>6.00725</td>\n",
-       "      <td>6.00725</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>CYTBD</th>\n",
-       "      <td>43.59899</td>\n",
-       "      <td>43.59899</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>D_LACt2</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>0.00000</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ENO</th>\n",
-       "      <td>14.71614</td>\n",
-       "      <td>14.71614</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ETOHt2r</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>0.00000</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>EX_ac_e</th>\n",
-       "      <td>-0.00000</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-4.547474e-14</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
        "</table>\n",
        "</div>"
       ],
       "text/plain": [
-       "                     maximum   minimum\n",
-       "ACALD                0.00000   0.00000\n",
-       "ACALDt              -0.00000   0.00000\n",
-       "ACKr                -0.00000   0.00000\n",
-       "ACONTa               6.00725   6.00725\n",
-       "ACONTb               6.00725   6.00725\n",
-       "ACt2r                0.00000   0.00000\n",
-       "ADK1                -0.00000   0.00000\n",
-       "AKGDH                5.06438   5.06438\n",
-       "AKGt2r               0.00000   0.00000\n",
-       "ALCD2x               0.00000   0.00000\n",
-       "ATPM                 8.39000   8.39000\n",
-       "ATPS4r              45.51401  45.51401\n",
-       "Biomass_Ecoli_core   0.87392   0.87392\n",
-       "CO2t               -22.80983 -22.80983\n",
-       "CS                   6.00725   6.00725\n",
-       "CYTBD               43.59899  43.59899\n",
-       "D_LACt2              0.00000   0.00000\n",
-       "ENO                 14.71614  14.71614\n",
-       "ETOHt2r              0.00000   0.00000\n",
-       "EX_ac_e             -0.00000   0.00000"
+       "             maximum       minimum\n",
+       "ACALD  -2.208811e-30 -5.247085e-14\n",
+       "ACALDt  0.000000e+00 -5.247085e-14\n",
+       "ACKr    0.000000e+00 -8.024953e-14\n",
+       "ACONTa  2.000000e+01  2.000000e+01\n",
+       "ACONTb  2.000000e+01  2.000000e+01\n",
+       "ACt2r   0.000000e+00 -8.024953e-14\n",
+       "ADK1    3.410605e-13  0.000000e+00\n",
+       "AKGDH   2.000000e+01  2.000000e+01\n",
+       "AKGt2r  0.000000e+00 -2.902643e-14\n",
+       "ALCD2x  0.000000e+00 -4.547474e-14"
       ]
      },
-     "execution_count": 13,
+     "execution_count": 14,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "fva_result = cobra.flux_analysis.flux_variability_analysis(\n",
-    "    model, model.reactions[:20])\n",
-    "pandas.DataFrame.from_dict(fva_result).T.round(5)"
+    "flux_variability_analysis(model, model.reactions[:10])"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Setting parameter fraction_of_optimium=0.90 would give the flux ranges for reactions at 90% optimality."
+    "Setting parameter `fraction_of_optimium=0.90` would give the flux ranges for reactions at 90% optimality."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 14,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 15,
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/html": [
        "<div>\n",
+       "<style>\n",
+       "    .dataframe thead tr:only-child th {\n",
+       "        text-align: right;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe thead th {\n",
+       "        text-align: left;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe tbody tr th {\n",
+       "        vertical-align: top;\n",
+       "    }\n",
+       "</style>\n",
        "<table border=\"1\" class=\"dataframe\">\n",
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
@@ -561,175 +555,249 @@
        "  <tbody>\n",
        "    <tr>\n",
        "      <th>ACALD</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>-2.54237</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-2.692308</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACALDt</th>\n",
-       "      <td>-0.00000</td>\n",
-       "      <td>-2.54237</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-2.692308</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACKr</th>\n",
-       "      <td>-0.00000</td>\n",
-       "      <td>-3.81356</td>\n",
+       "      <td>6.635712e-30</td>\n",
+       "      <td>-4.117647</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACONTa</th>\n",
-       "      <td>8.89452</td>\n",
-       "      <td>0.84859</td>\n",
+       "      <td>2.000000e+01</td>\n",
+       "      <td>8.461538</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACONTb</th>\n",
-       "      <td>8.89452</td>\n",
-       "      <td>0.84859</td>\n",
+       "      <td>2.000000e+01</td>\n",
+       "      <td>8.461538</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ACt2r</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>-3.81356</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-4.117647</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ADK1</th>\n",
-       "      <td>17.16100</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>1.750000e+01</td>\n",
+       "      <td>0.000000</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>AKGDH</th>\n",
-       "      <td>8.04593</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <td>2.000000e+01</td>\n",
+       "      <td>2.500000</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>AKGt2r</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>-1.43008</td>\n",
+       "      <td>2.651196e-16</td>\n",
+       "      <td>-1.489362</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>ALCD2x</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>-2.21432</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ATPM</th>\n",
-       "      <td>25.55100</td>\n",
-       "      <td>8.39000</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ATPS4r</th>\n",
-       "      <td>59.38106</td>\n",
-       "      <td>34.82562</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>Biomass_Ecoli_core</th>\n",
-       "      <td>0.87392</td>\n",
-       "      <td>0.78653</td>\n",
+       "      <td>0.000000e+00</td>\n",
+       "      <td>-2.333333</td>\n",
        "    </tr>\n",
-       "    <tr>\n",
-       "      <th>CO2t</th>\n",
-       "      <td>-15.20653</td>\n",
-       "      <td>-26.52885</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>CS</th>\n",
-       "      <td>8.89452</td>\n",
-       "      <td>0.84859</td>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "</div>"
+      ],
+      "text/plain": [
+       "             maximum   minimum\n",
+       "ACALD   0.000000e+00 -2.692308\n",
+       "ACALDt  0.000000e+00 -2.692308\n",
+       "ACKr    6.635712e-30 -4.117647\n",
+       "ACONTa  2.000000e+01  8.461538\n",
+       "ACONTb  2.000000e+01  8.461538\n",
+       "ACt2r   0.000000e+00 -4.117647\n",
+       "ADK1    1.750000e+01  0.000000\n",
+       "AKGDH   2.000000e+01  2.500000\n",
+       "AKGt2r  2.651196e-16 -1.489362\n",
+       "ALCD2x  0.000000e+00 -2.333333"
+      ]
+     },
+     "execution_count": 15,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "cobra.flux_analysis.flux_variability_analysis(\n",
+    "    model, model.reactions[:10], fraction_of_optimum=0.9)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The standard FVA may contain loops, i.e. high absolute flux values that only can be high if they are allowed to participate in loops (a mathematical artifact that cannot happen in vivo). Use the `loopless` argument to avoid such loops. Below, we can see that FRD7 and SUCDi reactions can participate in loops but that this is avoided when using the looplesss FVA."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "<div>\n",
+       "<style>\n",
+       "    .dataframe thead tr:only-child th {\n",
+       "        text-align: right;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe thead th {\n",
+       "        text-align: left;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe tbody tr th {\n",
+       "        vertical-align: top;\n",
+       "    }\n",
+       "</style>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>maximum</th>\n",
+       "      <th>minimum</th>\n",
        "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
        "    <tr>\n",
-       "      <th>CYTBD</th>\n",
-       "      <td>51.23909</td>\n",
-       "      <td>35.98486</td>\n",
+       "      <th>FRD7</th>\n",
+       "      <td>980.0</td>\n",
+       "      <td>0.0</td>\n",
        "    </tr>\n",
        "    <tr>\n",
-       "      <th>D_LACt2</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>-2.14512</td>\n",
+       "      <th>SUCDi</th>\n",
+       "      <td>1000.0</td>\n",
+       "      <td>20.0</td>\n",
        "    </tr>\n",
-       "    <tr>\n",
-       "      <th>ENO</th>\n",
-       "      <td>16.73252</td>\n",
-       "      <td>8.68659</td>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "</div>"
+      ],
+      "text/plain": [
+       "       maximum  minimum\n",
+       "FRD7     980.0      0.0\n",
+       "SUCDi   1000.0     20.0"
+      ]
+     },
+     "execution_count": 16,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "loop_reactions = [model.reactions.FRD7, model.reactions.SUCDi]\n",
+    "flux_variability_analysis(model, reaction_list=loop_reactions, loopless=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "<div>\n",
+       "<style>\n",
+       "    .dataframe thead tr:only-child th {\n",
+       "        text-align: right;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe thead th {\n",
+       "        text-align: left;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe tbody tr th {\n",
+       "        vertical-align: top;\n",
+       "    }\n",
+       "</style>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>maximum</th>\n",
+       "      <th>minimum</th>\n",
        "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
        "    <tr>\n",
-       "      <th>ETOHt2r</th>\n",
-       "      <td>0.00000</td>\n",
-       "      <td>-2.21432</td>\n",
+       "      <th>FRD7</th>\n",
+       "      <td>0.0</td>\n",
+       "      <td>0.0</td>\n",
        "    </tr>\n",
        "    <tr>\n",
-       "      <th>EX_ac_e</th>\n",
-       "      <td>3.81356</td>\n",
-       "      <td>0.00000</td>\n",
+       "      <th>SUCDi</th>\n",
+       "      <td>20.0</td>\n",
+       "      <td>20.0</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
        "</table>\n",
        "</div>"
       ],
       "text/plain": [
-       "                     maximum   minimum\n",
-       "ACALD                0.00000  -2.54237\n",
-       "ACALDt              -0.00000  -2.54237\n",
-       "ACKr                -0.00000  -3.81356\n",
-       "ACONTa               8.89452   0.84859\n",
-       "ACONTb               8.89452   0.84859\n",
-       "ACt2r                0.00000  -3.81356\n",
-       "ADK1                17.16100   0.00000\n",
-       "AKGDH                8.04593   0.00000\n",
-       "AKGt2r               0.00000  -1.43008\n",
-       "ALCD2x               0.00000  -2.21432\n",
-       "ATPM                25.55100   8.39000\n",
-       "ATPS4r              59.38106  34.82562\n",
-       "Biomass_Ecoli_core   0.87392   0.78653\n",
-       "CO2t               -15.20653 -26.52885\n",
-       "CS                   8.89452   0.84859\n",
-       "CYTBD               51.23909  35.98486\n",
-       "D_LACt2              0.00000  -2.14512\n",
-       "ENO                 16.73252   8.68659\n",
-       "ETOHt2r              0.00000  -2.21432\n",
-       "EX_ac_e              3.81356   0.00000"
+       "       maximum  minimum\n",
+       "FRD7       0.0      0.0\n",
+       "SUCDi     20.0     20.0"
       ]
      },
-     "execution_count": 14,
+     "execution_count": 17,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "fva_result = cobra.flux_analysis.flux_variability_analysis(\n",
-    "    model, model.reactions[:20], fraction_of_optimum=0.9)\n",
-    "pandas.DataFrame.from_dict(fva_result).T.round(5)"
+    "flux_variability_analysis(model, reaction_list=loop_reactions, loopless=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Running FVA in summary methods"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Running FVA in summary methods\n",
     "Flux variability analysis can also be embedded in calls to summary methods. For instance, the expected variability in substrate consumption and product formation can be quickly found by"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 15,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 18,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "IN FLUXES                     OUT FLUXES                    OBJECTIVES          \n",
-      "o2_e        -21.80 ± 1.91     h2o_e       27.86 ± 2.86      Biomass_Ecoli_core    0.874\n",
-      "glc__D_e     -9.76 ± 0.24     co2_e       21.81 ± 2.86                          \n",
-      "nh4_e        -4.84 ± 0.32     h_e         19.51 ± 2.86                          \n",
-      "pi_e         -3.13 ± 0.08     for_e        2.86 ± 2.86                          \n",
-      "                              ac_e         0.95 ± 0.95                          \n",
-      "                              acald_e      0.64 ± 0.64                          \n",
-      "                              pyr_e        0.64 ± 0.64                          \n",
-      "                              etoh_e       0.55 ± 0.55                          \n",
-      "                              lac__D_e     0.54 ± 0.54                          \n",
-      "                              succ_e       0.42 ± 0.42                          \n",
-      "                              akg_e        0.36 ± 0.36                          \n",
-      "                              glu__L_e     0.32 ± 0.32                          \n"
+      "IN FLUXES                     OUT FLUXES                    OBJECTIVES\n",
+      "----------------------------  ----------------------------  ------------\n",
+      "id          Flux  Range       id          Flux  Range       ATPM  175\n",
+      "--------  ------  ----------  --------  ------  ----------\n",
+      "o2_e          60  [55.9, 60]  co2_e         60  [54.2, 60]\n",
+      "glc__D_e      10  [9.5, 10]   h2o_e         60  [54.2, 60]\n",
+      "nh4_e          0  [0, 0.673]  for_e          0  [0, 5.83]\n",
+      "pi_e           0  [0, 0.171]  h_e            0  [0, 5.83]\n",
+      "                              ac_e           0  [0, 2.06]\n",
+      "                              acald_e        0  [0, 1.35]\n",
+      "                              pyr_e          0  [0, 1.35]\n",
+      "                              etoh_e         0  [0, 1.17]\n",
+      "                              lac__D_e       0  [0, 1.13]\n",
+      "                              succ_e         0  [0, 0.875]\n",
+      "                              akg_e          0  [0, 0.745]\n",
+      "                              glu__L_e       0  [0, 0.673]\n"
      ]
     }
    ],
@@ -742,31 +810,37 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Similarly, variability in metabolite mass balances can also be checked with flux variability analysis"
+    "Similarly, variability in metabolite mass balances can also be checked with flux variability analysis."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 16,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 19,
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "PRODUCING REACTIONS -- Pyruvate\n",
-      "-------------------------------\n",
-      "  %             FLUX   RXN ID                        REACTION                       \n",
-      " 85.0%   9.76 ± 0.24   GLCpts                     glc__D_e + pep_c --> g6p_c + pyr_c\n",
-      " 15.0%   6.13 ± 6.13      PYK                  adp_c + h_c + pep_c --> atp_c + pyr_c\n",
+      "PRODUCING REACTIONS -- Pyruvate (pyr_c)\n",
+      "---------------------------------------\n",
+      "%       FLUX  RANGE         RXN ID      REACTION\n",
+      "----  ------  ------------  ----------  ----------------------------------------\n",
+      "50%       10  [1.25, 18.8]  PYK         adp_c + h_c + pep_c --> atp_c + pyr_c\n",
+      "50%       10  [9.5, 10]     GLCpts      glc__D_e + pep_c --> g6p_c + pyr_c\n",
+      "0%         0  [0, 8.75]     ME1         mal__L_c + nad_c --> co2_c + nadh_c +...\n",
+      "0%         0  [0, 8.75]     ME2         mal__L_c + nadp_c --> co2_c + nadph_c...\n",
       "\n",
-      "CONSUMING REACTIONS -- Pyruvate\n",
-      "-------------------------------\n",
-      "  %             FLUX   RXN ID                        REACTION                       \n",
-      " 78.9%  11.34 ± 7.43      PDH     coa_c + nad_c + pyr_c --> accoa_c + co2_c + nadh_c\n",
-      " 21.1%   0.85 ± 0.02 Bioma...   1.496 3pg_c + 3.7478 accoa_c + 59.81 atp_c + 0.36...\n"
+      "CONSUMING REACTIONS -- Pyruvate (pyr_c)\n",
+      "---------------------------------------\n",
+      "%       FLUX  RANGE         RXN ID      REACTION\n",
+      "----  ------  ------------  ----------  ----------------------------------------\n",
+      "100%      20  [13, 28.8]    PDH         coa_c + nad_c + pyr_c --> accoa_c + c...\n",
+      "0%         0  [0, 8.75]     PPS         atp_c + h2o_c + pyr_c --> amp_c + 2.0...\n",
+      "0%         0  [0, 5.83]     PFL         coa_c + pyr_c --> accoa_c + for_c\n",
+      "0%         0  [0, 1.35]     PYRt2       h_e + pyr_e <=> h_c + pyr_c\n",
+      "0%         0  [0, 1.13]     LDH_D       lac__D_c + nad_c <=> h_c + nadh_c + p...\n",
+      "0%         0  [0, 0.132]    Biomass...  1.496 3pg_c + 3.7478 accoa_c + 59.81 ...\n"
      ]
     }
    ],
@@ -785,50 +859,55 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Running pFBA\n",
-    "\n",
+    "## Running pFBA"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "Parsimonious FBA (often written pFBA) finds a flux distribution which gives the optimal growth rate, but minimizes the total sum of flux. This involves solving two sequential linear programs, but is handled transparently by cobrapy. For more details on pFBA, please see [Lewis et al. (2010)](http://dx.doi.org/10.1038/msb.2010.47)."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 17,
+   "execution_count": 20,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
-    "FBA_sol = model.optimize()\n",
-    "pFBA_sol = cobra.flux_analysis.optimize_minimal_flux(model)"
+    "model.objective = 'Biomass_Ecoli_core'\n",
+    "fba_solution = model.optimize()\n",
+    "pfba_solution = cobra.flux_analysis.pfba(model)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "These functions should give approximately the same objective value"
+    "These functions should give approximately the same objective value."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 21,
+   "metadata": {},
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "1.1102230246251565e-16"
+       "7.7715611723760958e-16"
       ]
      },
-     "execution_count": 18,
+     "execution_count": 21,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "abs(FBA_sol.f - pFBA_sol.f)"
+    "abs(fba_solution.fluxes[\"Biomass_Ecoli_core\"] - pfba_solution.fluxes[\n",
+    "    \"Biomass_Ecoli_core\"])"
    ]
   }
  ],
@@ -848,9 +927,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.4"
+   "version": "3.6.0"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
 }
diff --git a/documentation_builder/solvers.ipynb b/documentation_builder/solvers.ipynb
index 06c76a8..c7e8049 100644
--- a/documentation_builder/solvers.ipynb
+++ b/documentation_builder/solvers.ipynb
@@ -4,702 +4,104 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Solver Interface\n",
-    "\n",
-    "Each cobrapy solver must expose the following API. The solvers all will have their own distinct LP object types, but each can be manipulated by these functions. This API can be used directly when implementing algorithms efficiently on linear programs because it has 2 primary benefits:\n",
-    "\n",
-    "1. Avoid the overhead of creating and destroying LP's for each operation\n",
-    "\n",
-    "2. Many solver objects preserve the basis between subsequent LP's, making each subsequent LP solve faster\n",
-    "\n",
-    "We will walk though the API with the cglpk solver, which links the cobrapy solver API with [GLPK](http://www.gnu.org/software/glpk/)'s C API."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "import cobra.test\n",
-    "\n",
-    "model = cobra.test.create_test_model(\"textbook\")\n",
-    "solver = cobra.solvers.cglpk"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Attributes and functions\n",
-    "\n",
-    "Each solver has some attributes:\n",
-    "\n",
-    "### solver_name\n",
-    "\n",
-    "The name of the solver. This is the name which will be used to select the solver in cobrapy functions."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'cglpk'"
-      ]
-     },
-     "execution_count": 2,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.solver_name"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<Solution 0.87 at 0x7fd42ad90c18>"
-      ]
-     },
-     "execution_count": 3,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "model.optimize(solver=\"cglpk\")"
+    "# Solvers"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### \\_SUPPORTS_MILP\n",
-    "\n",
-    "The presence of this attribute tells cobrapy that the solver supports mixed-integer linear programming"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "True"
-      ]
-     },
-     "execution_count": 4,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
    "source": [
-    "solver._SUPPORTS_MILP"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### solve\n",
+    "A constraints-based reconstruction and analysis model for biological systems is actually just an application of a class of discrete optimization problems typically solved with [linear, mixed integer](https://en.wikipedia.org/wiki/Linear_programming) or [quadratic programming](https://en.wikipedia.org/wiki/Quadratic_programming) techniques. Cobrapy does not implement any algorithms to find solutions to such problems but rather creates an biologically motivated abstraction to these te [...]
     "\n",
-    "Model.optimize is a wrapper for each solver's solve function. It takes in a cobra model and returns a solution"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<Solution 0.87 at 0x7fd42ad90908>"
-      ]
-     },
-     "execution_count": 5,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.solve(model)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### create_problem\n",
+    "The actual solving is instead done by tools such as the free software [glpk](https://www.gnu.org/software/glpk/) or commercial tools [gurobi](http://www.gurobi.com/) and [cplex](https://www-01.ibm.com/software/commerce/optimization/cplex-optimizer/) which are all made available as a common programmers interface via the [optlang](https://github.com/biosustain/optlang) package.\n",
     "\n",
-    "This creates the LP object for the solver."
+    "When you have defined your model, you can switch solver backend by simply assigning to the `model.solver` property."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<cobra.solvers.cglpk.GLP at 0x3e846e8>"
-      ]
-     },
-     "execution_count": 6,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "lp = solver.create_problem(model, objective_sense=\"maximize\")\n",
-    "lp"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### solve_problem\n",
-    "\n",
-    "Solve the LP object and return the solution status"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'optimal'"
-      ]
-     },
-     "execution_count": 7,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.solve_problem(lp)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### format_solution\n",
-    "\n",
-    "Extract a cobra.Solution object from a solved LP object"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<Solution 0.87 at 0x7fd42ad90668>"
-      ]
-     },
-     "execution_count": 8,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.format_solution(lp, model)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### get_objective_value\n",
-    "\n",
-    "Extract the objective value from a solved LP object"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "0.8739215069684909"
-      ]
-     },
-     "execution_count": 9,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.get_objective_value(lp)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### get_status\n",
-    "\n",
-    "Get the solution status of a solved LP object"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'optimal'"
-      ]
-     },
-     "execution_count": 10,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.get_status(lp)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### change_variable_objective\n",
-    "\n",
-    "change the objective coefficient a reaction at a particular index. This does not change any of the other objectives which have already been set. This example will double and then revert the biomass coefficient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "12"
-      ]
-     },
-     "execution_count": 11,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "model.reactions.index(\"Biomass_Ecoli_core\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "1.7478430139369818"
-      ]
-     },
-     "execution_count": 12,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.change_variable_objective(lp, 12, 2)\n",
-    "solver.solve_problem(lp)\n",
-    "solver.get_objective_value(lp)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "0.8739215069684909"
-      ]
-     },
-     "execution_count": 13,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.change_variable_objective(lp, 12, 1)\n",
-    "solver.solve_problem(lp)\n",
-    "solver.get_objective_value(lp)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### change variable_bounds\n",
-    "\n",
-    "change the lower and upper bounds of a reaction at a particular index. This example will set the lower bound of the biomass to an infeasible value, then revert it."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 14,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'infeasible'"
-      ]
-     },
-     "execution_count": 14,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.change_variable_bounds(lp, 12, 1000, 1000)\n",
-    "solver.solve_problem(lp)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 15,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'optimal'"
-      ]
-     },
-     "execution_count": 15,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.change_variable_bounds(lp, 12, 0, 1000)\n",
-    "solver.solve_problem(lp)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### change_coefficient\n",
-    "\n",
-    "Change a coefficient in the stoichiometric matrix. In this example, we will set the entry for ADP in the ATMP reaction to in infeasible value, then reset it."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 16,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "16"
-      ]
-     },
-     "execution_count": 16,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "model.metabolites.index(\"atp_c\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 17,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "10"
-      ]
-     },
-     "execution_count": 17,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "model.reactions.index(\"ATPM\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'infeasible'"
-      ]
-     },
-     "execution_count": 18,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.change_coefficient(lp, 16, 10, -10)\n",
-    "solver.solve_problem(lp)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
+   "execution_count": 1,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "'optimal'"
-      ]
-     },
-     "execution_count": 19,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "solver.change_coefficient(lp, 16, 10, -1)\n",
-    "solver.solve_problem(lp)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
+   "outputs": [],
    "source": [
-    "### set_parameter\n",
-    "\n",
-    "Set a solver parameter. Each solver will have its own particular set of unique paramters. However, some have unified names. For example, all solvers should accept \"tolerance_feasibility.\""
+    "import cobra.test\n",
+    "model = cobra.test.create_test_model('textbook')"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 20,
+   "execution_count": 2,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [],
    "source": [
-    "solver.set_parameter(lp, \"tolerance_feasibility\", 1e-9)"
+    "model.solver = 'glpk'\n",
+    "# or if you have cplex installed\n",
+    "model.solver = 'cplex'"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 21,
+   "cell_type": "markdown",
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "0.0"
-      ]
-     },
-     "execution_count": 21,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
    "source": [
-    "solver.set_parameter(lp, \"objective_sense\", \"minimize\")\n",
-    "solver.solve_problem(lp)\n",
-    "solver.get_objective_value(lp)"
+    "For information on how to configure and tune the solver, please see the [documentation for optlang project](http://optlang.readthedocs.io) and note that `model.solver` is simply an object optlang of class `Model`."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 22,
+   "execution_count": 3,
    "metadata": {
-    "collapsed": false
+    "collapsed": false,
+    "deletable": true,
+    "editable": true
    },
    "outputs": [
     {
      "data": {
       "text/plain": [
-       "0.8739215069684912"
+       "optlang.cplex_interface.Model"
       ]
      },
-     "execution_count": 22,
+     "execution_count": 3,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "solver.set_parameter(lp, \"objective_sense\", \"maximize\")\n",
-    "solver.solve_problem(lp)\n",
-    "solver.get_objective_value(lp)"
+    "type(model.solver)"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Example with FVA\n",
-    "\n",
-    "Consider flux variability analysis (FVA), which requires maximizing and minimizing every reaction with the original biomass value fixed at its optimal value. If we used the cobra Model API in a naive implementation, we would do the following:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 23,
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CPU times: user 171 ms, sys: 0 ns, total: 171 ms\n",
-      "Wall time: 171 ms\n"
-     ]
-    }
-   ],
    "source": [
-    "%%time\n",
-    "# work on a copy of the model so the original is not changed\n",
-    "m = model.copy()\n",
-    "\n",
-    "# set the lower bound on the objective to be the optimal value\n",
-    "f = m.optimize().f\n",
-    "for objective_reaction, coefficient in m.objective.items():\n",
-    "    objective_reaction.lower_bound = coefficient * f\n",
-    "\n",
-    "# now maximize and minimze every reaction to find its bounds\n",
-    "fva_result = {}\n",
-    "for r in m.reactions:\n",
-    "    m.change_objective(r)\n",
-    "    fva_result[r.id] = {\n",
-    "        \"maximum\": m.optimize(objective_sense=\"maximize\").f,\n",
-    "        \"minimum\": m.optimize(objective_sense=\"minimize\").f\n",
-    "    }"
+    "## Internal solver interfaces"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Instead, we could use the solver API to do this more efficiently. This is roughly how cobrapy implementes FVA. It keeps uses the same LP object and repeatedly maximizes and minimizes it. This allows the solver to preserve the basis, and is much faster. The speed increase is even more noticeable the larger the model gets."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 24,
    "metadata": {
-    "collapsed": false
+    "deletable": true,
+    "editable": true
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CPU times: user 8.28 ms, sys: 25 µs, total: 8.31 ms\n",
-      "Wall time: 8.14 ms\n"
-     ]
-    }
-   ],
    "source": [
-    "%%time\n",
-    "# create the LP object\n",
-    "lp = solver.create_problem(model)\n",
-    "\n",
-    "# set the lower bound on the objective to be the optimal value\n",
-    "solver.solve_problem(lp)\n",
-    "f = solver.get_objective_value(lp)\n",
-    "for objective_reaction, coefficient in model.objective.items():\n",
-    "    objective_index = model.reactions.index(objective_reaction)\n",
-    "    # old objective is no longer the objective\n",
-    "    solver.change_variable_objective(lp, objective_index, 0.)\n",
-    "    solver.change_variable_bounds(\n",
-    "        lp, objective_index, f * coefficient,\n",
-    "        objective_reaction.upper_bound)\n",
-    "\n",
-    "# now maximize and minimze every reaction to find its bounds\n",
-    "fva_result = {}\n",
-    "for index, r in enumerate(model.reactions):\n",
-    "    solver.change_variable_objective(lp, index, 1.)\n",
-    "    result = {}\n",
-    "    solver.solve_problem(lp, objective_sense=\"maximize\")\n",
-    "    result[\"maximum\"] = solver.get_objective_value(lp)\n",
-    "    solver.solve_problem(lp, objective_sense=\"minimize\")\n",
-    "    result[\"minimum\"] = solver.get_objective_value(lp)\n",
-    "    solver.change_variable_objective(lp, index, 0.)\n",
-    "    fva_result[r.id] = result"
+    "Cobrapy also contains its own solver interfaces but these are now deprecated and will be removed completely in the near future. For documentation of how to use these, please refer to [older documentation](http://cobrapy.readthedocs.io/en/0.5.11/)."
    ]
   }
  ],
@@ -719,9 +121,9 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+   "version": "3.5.2"
   }
  },
  "nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
 }
diff --git a/ez_setup.py b/ez_setup.py
deleted file mode 100644
index 4ef3ee0..0000000
--- a/ez_setup.py
+++ /dev/null
@@ -1,426 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Setuptools bootstrapping installer.
-
-Maintained at https://github.com/pypa/setuptools/tree/bootstrap.
-
-Run this script to install or upgrade setuptools.
-"""
-
-import os
-import shutil
-import sys
-import tempfile
-import zipfile
-import optparse
-import subprocess
-import platform
-import textwrap
-import contextlib
-import json
-import codecs
-
-from distutils import log
-
-try:
-    from urllib.request import urlopen
-    from urllib.parse import urljoin
-except ImportError:
-    from urllib2 import urlopen
-    from urlparse import urljoin
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-LATEST = object()
-DEFAULT_VERSION = LATEST
-DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/"
-DEFAULT_SAVE_DIR = os.curdir
-
-
-def _python_cmd(*args):
-    """
-    Execute a command.
-
-    Return True if the command succeeded.
-    """
-    args = (sys.executable,) + args
-    return subprocess.call(args) == 0
-
-
-def _install(archive_filename, install_args=()):
-    """Install Setuptools."""
-    with archive_context(archive_filename):
-        # installing
-        log.warn('Installing Setuptools')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-
-
-def _build_egg(egg, archive_filename, to_dir):
-    """Build Setuptools egg."""
-    with archive_context(archive_filename):
-        # building an egg
-        log.warn('Building a Setuptools egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-class ContextualZipFile(zipfile.ZipFile):
-
-    """Supplement ZipFile class to support context manager for Python 2.6."""
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, type, value, traceback):
-        self.close()
-
-    def __new__(cls, *args, **kwargs):
-        """Construct a ZipFile or ContextualZipFile as appropriate."""
-        if hasattr(zipfile.ZipFile, '__exit__'):
-            return zipfile.ZipFile(*args, **kwargs)
-        return super(ContextualZipFile, cls).__new__(cls)
-
-
- at contextlib.contextmanager
-def archive_context(filename):
-    """
-    Unzip filename to a temporary directory, set to the cwd.
-
-    The unzipped target is cleaned up after.
-    """
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        with ContextualZipFile(filename) as archive:
-            archive.extractall()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-        yield
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    """Download Setuptools."""
-    py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys)
-    tp = 'setuptools-{version}-{py_desig}.egg'
-    egg = os.path.join(to_dir, tp.format(**locals()))
-    if not os.path.exists(egg):
-        archive = download_setuptools(version, download_base,
-            to_dir, download_delay)
-        _build_egg(egg, archive, to_dir)
-    sys.path.insert(0, egg)
-
-    # Remove previously-imported pkg_resources if present (see
-    # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
-    if 'pkg_resources' in sys.modules:
-        _unload_pkg_resources()
-
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(
-        version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=DEFAULT_SAVE_DIR, download_delay=15):
-    """
-    Ensure that a setuptools version is installed.
-
-    Return None. Raise SystemExit if the requested version
-    or later cannot be installed.
-    """
-    version = _resolve_version(version)
-    to_dir = os.path.abspath(to_dir)
-
-    # prior to importing, capture the module state for
-    # representative modules.
-    rep_modules = 'pkg_resources', 'setuptools'
-    imported = set(sys.modules).intersection(rep_modules)
-
-    try:
-        import pkg_resources
-        pkg_resources.require("setuptools>=" + version)
-        # a suitable version is already installed
-        return
-    except ImportError:
-        # pkg_resources not available; setuptools is not installed; download
-        pass
-    except pkg_resources.DistributionNotFound:
-        # no version of setuptools was found; allow download
-        pass
-    except pkg_resources.VersionConflict as VC_err:
-        if imported:
-            _conflict_bail(VC_err, version)
-
-        # otherwise, unload pkg_resources to allow the downloaded version to
-        #  take precedence.
-        del pkg_resources
-        _unload_pkg_resources()
-
-    return _do_download(version, download_base, to_dir, download_delay)
-
-
-def _conflict_bail(VC_err, version):
-    """
-    Setuptools was imported prior to invocation, so it is
-    unsafe to unload it. Bail out.
-    """
-    conflict_tmpl = textwrap.dedent("""
-        The required version of setuptools (>={version}) is not available,
-        and can't be installed while this script is running. Please
-        install a more recent version first, using
-        'easy_install -U setuptools'.
-
-        (Currently using {VC_err.args[0]!r})
-        """)
-    msg = conflict_tmpl.format(**locals())
-    sys.stderr.write(msg)
-    sys.exit(2)
-
-
-def _unload_pkg_resources():
-    sys.meta_path = [
-        importer
-        for importer in sys.meta_path
-        if importer.__class__.__module__ != 'pkg_resources.extern'
-    ]
-    del_modules = [
-        name for name in sys.modules
-        if name.startswith('pkg_resources')
-    ]
-    for mod_name in del_modules:
-        del sys.modules[mod_name]
-
-
-def _clean_check(cmd, target):
-    """
-    Run the command to download target.
-
-    If the command fails, clean up before re-raising the error.
-    """
-    try:
-        subprocess.check_call(cmd)
-    except subprocess.CalledProcessError:
-        if os.access(target, os.F_OK):
-            os.unlink(target)
-        raise
-
-
-def download_file_powershell(url, target):
-    """
-    Download the file at url to target using Powershell.
-
-    Powershell will validate trust.
-    Raise an exception if the command cannot complete.
-    """
-    target = os.path.abspath(target)
-    ps_cmd = (
-        "[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
-        "[System.Net.CredentialCache]::DefaultCredentials; "
-        '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")'
-        % locals()
-    )
-    cmd = [
-        'powershell',
-        '-Command',
-        ps_cmd,
-    ]
-    _clean_check(cmd, target)
-
-
-def has_powershell():
-    """Determine if Powershell is available."""
-    if platform.system() != 'Windows':
-        return False
-    cmd = ['powershell', '-Command', 'echo test']
-    with open(os.path.devnull, 'wb') as devnull:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    return True
-download_file_powershell.viable = has_powershell
-
-
-def download_file_curl(url, target):
-    cmd = ['curl', url, '--location', '--silent', '--output', target]
-    _clean_check(cmd, target)
-
-
-def has_curl():
-    cmd = ['curl', '--version']
-    with open(os.path.devnull, 'wb') as devnull:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    return True
-download_file_curl.viable = has_curl
-
-
-def download_file_wget(url, target):
-    cmd = ['wget', url, '--quiet', '--output-document', target]
-    _clean_check(cmd, target)
-
-
-def has_wget():
-    cmd = ['wget', '--version']
-    with open(os.path.devnull, 'wb') as devnull:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    return True
-download_file_wget.viable = has_wget
-
-
-def download_file_insecure(url, target):
-    """Use Python to download the file, without connection authentication."""
-    src = urlopen(url)
-    try:
-        # Read all the data in one block.
-        data = src.read()
-    finally:
-        src.close()
-
-    # Write all the data in one block to avoid creating a partial file.
-    with open(target, "wb") as dst:
-        dst.write(data)
-download_file_insecure.viable = lambda: True
-
-
-def get_best_downloader():
-    downloaders = (
-        download_file_powershell,
-        download_file_curl,
-        download_file_wget,
-        download_file_insecure,
-    )
-    viable_downloaders = (dl for dl in downloaders if dl.viable())
-    return next(viable_downloaders, None)
-
-
-def download_setuptools(
-        version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=DEFAULT_SAVE_DIR, delay=15,
-        downloader_factory=get_best_downloader):
-    """
-    Download setuptools from a specified location and return its filename.
-
-    `version` should be a valid setuptools version number that is available
-    as an sdist for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-
-    ``downloader_factory`` should be a function taking no arguments and
-    returning a function for downloading a URL to a target.
-    """
-    version = _resolve_version(version)
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    zip_name = "setuptools-%s.zip" % version
-    url = download_base + zip_name
-    saveto = os.path.join(to_dir, zip_name)
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        log.warn("Downloading %s", url)
-        downloader = downloader_factory()
-        downloader(url, saveto)
-    return os.path.realpath(saveto)
-
-
-def _resolve_version(version):
-    """
-    Resolve LATEST version
-    """
-    if version is not LATEST:
-        return version
-
-    meta_url = urljoin(DEFAULT_URL, '/pypi/setuptools/json')
-    resp = urlopen(meta_url)
-    with contextlib.closing(resp):
-        try:
-            charset = resp.info().get_content_charset()
-        except Exception:
-            # Python 2 compat; assume UTF-8
-            charset = 'UTF-8'
-        reader = codecs.getreader(charset)
-        doc = json.load(reader(resp))
-
-    return str(doc['info']['version'])
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the setuptools package.
-
-    Returns list of command line arguments.
-    """
-    return ['--user'] if options.user_install else []
-
-
-def _parse_args():
-    """Parse the command line for options."""
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the setuptools package')
-    parser.add_option(
-        '--insecure', dest='downloader_factory', action='store_const',
-        const=lambda: download_file_insecure, default=get_best_downloader,
-        help='Use internal, non-validating downloader'
-    )
-    parser.add_option(
-        '--version', help="Specify which version to download",
-        default=DEFAULT_VERSION,
-    )
-    parser.add_option(
-        '--to-dir',
-        help="Directory to save (and re-use) package",
-        default=DEFAULT_SAVE_DIR,
-    )
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-
-def _download_args(options):
-    """Return args for download_setuptools function from cmdline args."""
-    return dict(
-        version=options.version,
-        download_base=options.download_base,
-        downloader_factory=options.downloader_factory,
-        to_dir=options.to_dir,
-    )
-
-
-def main():
-    """Install or upgrade setuptools and EasyInstall."""
-    options = _parse_args()
-    archive = download_setuptools(**_download_args(options))
-    return _install(archive, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/manylinux_builder/Dockerfile b/manylinux_builder/Dockerfile
index 3784f04..0866756 100644
--- a/manylinux_builder/Dockerfile
+++ b/manylinux_builder/Dockerfile
@@ -1,6 +1,6 @@
 FROM quay.io/pypa/manylinux1_x86_64
 
-ENV GLPK_VER="4.60"
+ENV GLPK_VER="4.61"
 RUN wget http://ftp.gnu.org/gnu/glpk/glpk-${GLPK_VER}.tar.gz -O - | tar xz
 WORKDIR glpk-${GLPK_VER}
 RUN ./configure && make install
diff --git a/manylinux_builder/build_cobrapy.sh b/manylinux_builder/build_cobrapy.sh
index 8552aa8..6703649 100755
--- a/manylinux_builder/build_cobrapy.sh
+++ b/manylinux_builder/build_cobrapy.sh
@@ -1,7 +1,6 @@
 #!/bin/bash
 
 for PYBIN in /opt/python/*/bin; do
-    ${PYBIN}/pip install pyelftools==0.23
     ${PYBIN}/pip wheel cobra --pre
 done
 
diff --git a/release-notes/0.6.0.md b/release-notes/0.6.0.md
new file mode 100644
index 0000000..7b5fc22
--- /dev/null
+++ b/release-notes/0.6.0.md
@@ -0,0 +1,206 @@
+# Release notes for cobrapy 0.6.0
+
+## Highlights
+
+In this release we have made major changes to pretty much all corners
+of cobrapy and we hope that you will enjoy the new features as much as
+we do, and that any negative impacts on existing workflows will be
+limited.
+
+The major change is the ongoing move away from cobrapy's internal
+solver interfaces to those provided by
+[optlang](https://github.com/biosustain/optlang) which provides a
+single unified interface to glpk, cplex and gurobi enhanced by the
+ability to deal with symbolic expressions. This means formulating
+complex constraints no longer implies defining the necessary matrix
+algebra, but instead simply writing the expression and assigning that
+as an objective to the model.
+
+We feel that this, and the clarified scope and focus attained by
+separating the topic of linear programming (optlang) and metabolic
+flux analysis (cobrapy) to two packages is natural and makes both of
+these tasks less confusing and more fun. We hope that you after
+trying, feel the same and that in any case you let us know what you
+think by
+[raising an issue](https://github.com/opencobra/cobrapy/issues) or
+talking directly to us on [gitter](gitter.im/opencobra/cobrapy) or
+[google groups](https://groups.google.com/forum/#!forum/cobra-pie).
+
+## New features
+
+### The optlang solver interface
+
+The main change is the addition of `model.solver` which is the optlang
+interface to the chosen solver (cplex and glpk are currently well
+supported, gurobi interface is at the time of writing mostly working
+but improvements are still expected). The solver interface manages
+variables, constraints and the objective of the model and the task of
+turning these into a model formulation to be optimized by the
+solver. From cobrapy's point-of-view, this means that all aspects
+concerning generating problems, configuring solvers are handed over to
+optlang and consequently the whole `cobra.solver` has been deprecated,
+slated for removal in the next major release of cobrapy.
+
+Importantly, configuring the solver by passing `**solver_args` or
+`solver='solver'` is now instead done by assigning solver to
+`model.solver` and then configuring via `model.solver.configuration`.
+
+Creating new algorithms has been greatly facilitated as it no longer
+requires formulating objectives and constraints by matrix algebra but
+instead directly by expressions, e.g. see the implementation of
+`cobra.flux_analysis.moma.add_moma` and
+`cobra.flux_analysis.loopless.loopless_solution`.
+
+Instead of having only reactions as variables and metabolites as
+constraints, with optlang, cobrapy now supports arbitrary constraints
+and variables and these can be added/removed by `model.add_cons_vars`
+and `model.remove_cons_vars` which take care of adding these to
+`model.problem` which is the optlang's mathematical model associated
+with the cobra model.
+
+Reactions are now modeled by two variables, forward and reverse, and
+these can be seen by accessing `reaction.{forward,reverse}_variable`
+and the combined `reaction.flux_expression`.
+
+Objectives can now easily be made quite advanced by simply crafting
+the right expression and assigning this as usual to `model.objective`,
+see the
+[contraints and objectives notebook](http://cobrapy.readthedocs.io/en/latest/constraints_objectives.html).
+
+### Temporary changes to a model
+
+Models are large complex objects and copying such objects is
+inevitably slow. To avoid that, cobrapy has drawn on the experience
+from the development of cameo to introduce the `HistoryManager` class
+and the concept of models as contexts. Now, most changes that can be
+made to a model such as changing the objective, setting reaction
+bounds, adding and removing reactions, is reversed upon exit when done
+inside a context, see the updated
+[getting started notebook](http://cobrapy.readthedocs.io/en/latest/getting_started.html).
+
+### Improved solution handling
+
+Previously, cobra models lugged around their latest solution to enable
+providing `reaction.{flux,reduced_cost}` (formerly
+`reaction.{x,y}`). This was problematic because if the model had
+changed since last optimization, then this would effectively give the
+wrong result. On top of that, it was not easy to make a change,
+optimize and get values, and then undo that change to the model
+without having to copy the whole model object. To solve this, and many
+similar problem, we have completely refactored `cobra.Solution` so
+that `model.optimize()` now returns a solution and it is the user's
+responsibility to manage this object. `reaction.flux` gets its values
+directly from the `model.problem`. To sugar the new solution class,
+fluxes, reduced costs, and shadow prices are now pandas series! Fluxes
+and reduced costs can be returned as a data frame directlt with the
+`to_frame` method.
+
+## Sampling
+
+Cobrapy now has flux sampling supported by
+`cobra.flux_analysis.sampling` see
+[the sampling notebook](http://cobrapy.readthedocs.io/en/latest/sampling.html).
+
+## Loopless models and solutions
+
+Added implementations of
+[CycleFreeFlux](http://dx.doi.org/10.1093/bioinformatics/btv096) and
+the loopless model of
+[Schellenberger et al.](http://dx.doi.org/10.1016/j.bpj.2010.12.3707). See
+the
+[notebook on loopless](http://cobrapy.readthedocs.io/en/latest/loopless.html)
+and [simulating](http://cobrapy.readthedocs.io/en/latest/simulating.html)
+
+## DataFrames as return values
+
+`flux_variability_analysis`, `single_{gene,reaction}_deletion`,
+`cobra.flux_analysis.sampling` and
+`cobra.util.create_stoichiometric_matrix` now return pandas data frames
+instead of nested dicts as these are more convenient and fun to work
+with. Pandas (and numpy) are therefore now hard requirements for
+cobrapy, which should not be a problem for neither linux, windows or
+mac users as there are reliable wheels for these packages now.
+
+### Model medium
+
+`model.medium` is now a dict and setter for getting boundary feeding
+reactions and their bounds
+
+### Knocking out genes
+
+Addition of `cobra.core.Gene.knock_out` which can be used to evaluate
+impact of knocking a gene (and all depending reactions).
+
+### Adding boundary reactions
+
+The model class has new method `model.add_boundary` which can be used
+to add sink, exchange or demand reactions with the appropriate bounds
+and prefixes (DM, SK or EX).
+
+### Gapfilling
+
+The `SMILEY` and `growMatch` implementations were refactored to a
+single new function `cobra.flux_analysis.gapfilling.gapfill` which
+handles both use-cases.
+
+### New Output Format in YAML
+
+Models can now be round tripped to/from YAML documents. YAML is a file format
+that is even more legible than JSON. In the scope of cobrapy, YAML output is
+intended for `diff` comparisons between models.
+
+## Fixes
+
+- Handle multiple IDs in Matlab models
+  [#345](https://github.com/opencobra/cobrapy/issues)
+- `DictList.query` behavior changed so that attribute is `None` if the
+  search parameter is not a regex or string, to enable
+  `reactions.query(lambda x: x.boundary)`
+- Set charge from notes if not defined elsewhere
+  [#352](https://github.com/opencobra/cobrapy/issues)
+- Warnings are no longer issued on package import if soft requirement
+  scipy, python-libsbml is not available.
+
+## Deprecated features
+
+These features are now deprecated and slated for complete removal in
+the next major cobrapy release.
+
+- The whole `cobra.solver` module is now deprecated, see New features.
+- `ArrayBasedModel` / `Model.to_array_based_model` are
+  deprecated. This formulation makes little sense when handing over
+  the matrix algebra to optlang, for the stoichiometry matrix (aka S),
+  see `cobra.util.array.create_stoichiometric_matrix`.
+- `Metabolite.y` in favor of `Metabolite.shadow_price`
+- `Model.add_reaction` in favor of `Model.add_reactions`
+- `Reaction.x` in favor of `Reaction.flux`
+- `Reaction.y` in favor of `Reaction.reduced_cost`
+- `Solution.{x, y, x_dict, y_dict, f}` in favor of `Solution.{fluxes,
+  reduced_costs}`. The setters are also deprecated.
+- `phenotype_phase_plane` in favor of `production_envelope`. The
+  plotting capabilities are deprecated, to be re-implemented somewhere
+  else.
+- `convert_to_irreverible`, `revert_to_irreversible`, `canonical_form`
+  deprecated without replacement.
+- `check_reaction_bounds` deprecated without replacement.
+- `optimize_minimal_flux` was renamed to `pfba`
+
+## Backwards incompatible changes
+
+- optknock was completely removed, users are advised to use cameo for
+  this functionality
+- dual_problem was removed
+- `cobra.topology` was removed, possibly to be reintroduced in a
+  different package
+- flux_variability_analysis results must be transformed to have them
+  work as the previous nested dict,
+  i.e. `flux_variability_analysis(model).T` should give behavior as
+  previously.
+- In a major linting effort we renamed capitalized modules to lower-case,
+  e.g. `cobra.core.Model` to `cobra.core.model`. Imports from `cobra`
+  are unchanged though.
+- objective coefficients of reactions can now only be set once the
+  reaction is attached to a model.
+- `Reaction.{x,y}`, `Metabolite.y` are defunct for legacy solvers.
+- `SMILEY` and `growMatch` algorithms are defunct in combination with
+  the legacy solvers.
diff --git a/release-notes/0.6.1.md b/release-notes/0.6.1.md
new file mode 100644
index 0000000..dfa2781
--- /dev/null
+++ b/release-notes/0.6.1.md
@@ -0,0 +1,9 @@
+# Release notes for cobrapy 0.6.1
+
+## Fixes
+
+- Debug `metabolite.add_metabolites` with `combine=False`
+  [#503](https://github.com/opencobra/cobrapy/pull/503)
+- Fix broken printing of metabolites
+  ([#502](https://github.com/opencobra/cobrapy/issues/502)), by
+  removing unnecessary `Frozendict`
diff --git a/release-notes/0.6.2.md b/release-notes/0.6.2.md
new file mode 100644
index 0000000..faee4fe
--- /dev/null
+++ b/release-notes/0.6.2.md
@@ -0,0 +1,29 @@
+# Release notes for cobrapy 0.6.2
+
+## Fixes
+
+- Fix in the ordering and rounding of FVA summary
+  [#525](https://github.com/opencobra/cobrapy/pull/525)
+- Shared memory, improve speed during sampling
+  [#521](https://github.com/opencobra/cobrapy/pull/521)
+- Debug `model.remove_reactions` to properly work with context manager.
+  This lead to the deprecation of `reaction.delete` as this was not compatible
+  with the concept of being able to later revert the change.
+  [#506](https://github.com/opencobra/cobrapy/issues/506),
+  [#508](https://github.com/opencobra/cobrapy/pull/508).
+- Adding two models (`modela + modelb`) again results in a model with
+  the objective set to the sum of the two models objectives
+  [#505](https://github.com/opencobra/cobrapy/issues/505).
+- When adding reactions to a model, the reactions with identifiers
+  identical to those in the model are
+  ignored. [#511](https://github.com/opencobra/cobrapy/issues/511)
+
+## New features
+- `model.merge` can be used to merge two models, more flexibly than
+  the overloaded + and += operators.
+
+## Deprecated features
+
+- `reaction.delete` has been deprecated in favor of `reaction.remove_from_model`
+- overloaded `+` and `+=` for `cobra.Model` are deprecated in favor of
+  `model.merge`
diff --git a/release-notes/0.7.0.md b/release-notes/0.7.0.md
new file mode 100644
index 0000000..aa0a67e
--- /dev/null
+++ b/release-notes/0.7.0.md
@@ -0,0 +1,23 @@
+# Release notes for cobrapy 0.7.0
+
+## Fixes
+
+- `cobra.flux_analysis.reaction.assess`
+  [was broken](https://github.com/opencobra/cobrapy/issues/537)
+  following the release of 0.6.0 and has now been fixed (and now with
+  unit tests).
+- `production_envelope` failed when model C-source was formulated as
+  -> x instead of x <-. Fixed added option to guess the C-source by
+  taking the medium reaction with the highest input C flux.
+- `model_to_pymatbridge` needs scipy and that's correctly handled now.
+
+## New features
+- `flux_variability_analysis` now has the `pfba_factor` parameter
+  which enables the inclusion of a constraint on the max sum of
+  absolute fluxes when doing FVA.
+
+## Deprecated features
+
+- `cobra.flux_analysis.reaction.assess_{precursors,products}` were
+  essentially copies of each other and have been merged to
+  `cobra.flux_analysis.reaction.assess_component`
diff --git a/release-notes/0.8.0.md b/release-notes/0.8.0.md
new file mode 100644
index 0000000..a02141a
--- /dev/null
+++ b/release-notes/0.8.0.md
@@ -0,0 +1,27 @@
+# Release notes for cobrapy 0.8.0
+
+## Fixes
+
+- Tests no longer generates warnings about using deprecated functions.
+- Gapfilling integrality thresholds now supported for all optlang solvers.
+
+## New features
+
+- `Model.slim_optimize()` can be used perform optimization without
+  creating a solution. Can lead to significant speedup compared to
+  `Model.optimize` when repeatedly doing optimizations and only making
+  use of the objective value as avoiding the need to fetch all values
+  from the solver object.
+- solution, model, metabolite and reaction now have html
+  representation so they give more informative prints in jupyter
+  notebooks.
+- New convenience functions `cobra.flux_analysis.find_essential_genes` and
+  `cobra.flux_analysis.find_essential_reactions`.
+- `Model.optimize` has new parameter `raise_error` to enable option to
+  get trigger exception if no feasible solution could be found.
+- `str(reaction)` now gives the more useful reaction id and the
+  reaction string.
+
+## Deprecated features
+
+- `str(reaction)` no longer gives `reaction.id`.
diff --git a/release-notes/0.8.1.md b/release-notes/0.8.1.md
new file mode 100644
index 0000000..1fe32af
--- /dev/null
+++ b/release-notes/0.8.1.md
@@ -0,0 +1,14 @@
+# Release notes for cobrapy 0.8.1
+
+## Fixes
+
+- Fix bug related to inhomogeneous sampling
+  [#558](https://github.com/opencobra/cobrapy/issues/558).  During
+  flux sampling, store the nullspace rather than the entire projection
+  (N * N.T) which reduces the memory footprint since it uses memory in
+  the order of `2 * n_reaction * n_metabolites` instead of `4 *
+  n_reactions^2` (no change in
+  speed). [PR #556](https://github.com/opencobra/cobrapy/pull/556)
+
+
+
diff --git a/release-notes/0.8.2.md b/release-notes/0.8.2.md
new file mode 100644
index 0000000..d2ecece
--- /dev/null
+++ b/release-notes/0.8.2.md
@@ -0,0 +1,17 @@
+# Release notes for cobrapy 0.8.2
+
+## Fixes
+
+- the Solution class no longer contains links progenitor model's
+  reactions and metabolites
+- Guarantee that sampler._reproject always returns a feasible point
+  and will not attempt to reproject already feasible
+  points. [#564](https://github.com/opencobra/cobrapy/pull/564)
+- `Model.summary` no longer fails when calling after the model has
+  changed. Fixed by letting the summary function re-compute a solution
+  (default) or letting user supply a prior computed solution
+  object. [#566](https://github.com/opencobra/cobrapy/pull/566)
+- Metabolites must now have valid identifiers before being added to a
+  model or `ValueError` is raised.
+- Fix use of underscores in key/value pairs in legacy sbml
+  notes. [#547](https://github.com/opencobra/cobrapy/issues/547)
diff --git a/release-notes/next-release.md b/release-notes/next-release.md
new file mode 100644
index 0000000..c622d35
--- /dev/null
+++ b/release-notes/next-release.md
@@ -0,0 +1,7 @@
+# Release notes for cobrapy x.y.z
+
+## Fixes
+
+## New features
+
+## Deprecated features
diff --git a/scripts/compare-benchmark.py b/scripts/compare-benchmark.py
new file mode 100644
index 0000000..c203dc4
--- /dev/null
+++ b/scripts/compare-benchmark.py
@@ -0,0 +1,35 @@
+import json
+import pandas as pd
+import argparse
+import re
+from os.path import basename
+
+
+def benchmark_to_df(json_file):
+    with open(json_file) as jf:
+        content = json.load(jf)
+        df = pd.DataFrame(columns=("test", "time [ms] "))
+        for b in content["benchmarks"]:
+            df = df.append({"test": b["name"],
+                            "time [ms] ": b["stats"]["mean"] * 1000.0},
+                           ignore_index=True)
+        return df
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description="""
+    compare cobrapy benchmarks.""")
+    parser.add_argument('first', help='first branch to compare')
+    parser.add_argument('second', help='second branch to compare')
+    args = parser.parse_args()
+
+    first = benchmark_to_df(args.first)
+    second = benchmark_to_df(args.second)
+    re_name = '^[0-9]+_(.+).json$'
+    first_name = re.findall(re_name, basename(args.first))[0]
+    second_name = re.findall(re_name, basename(args.second))[0]
+    both = pd.merge(first, second, how="inner", on="test",
+                    suffixes=(first_name, second_name))
+    both["fraction"] = both.iloc[:, 2] / both.iloc[:, 1]
+    both.sort_values(by="fraction")
+    print(both)
diff --git a/scripts/deploy.sh b/scripts/deploy.sh
index fcebd2f..7607376 100755
--- a/scripts/deploy.sh
+++ b/scripts/deploy.sh
@@ -1,7 +1,7 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
-
-echo -e " starting deploy for branch ${TRAVIS_BRANCH} .."
-
-pip install twine
-twine upload --skip-existing --username "${PYPI_USERNAME}" --password "${PYPI_PASSWORD}" ${TRAVIS_BUILD_DIR}/wheelhouse/*
+if [[ -n "${MB_PYTHON_VERSION}" ]]; then
+	echo -e " starting deploy for branch ${TRAVIS_BRANCH} .."
+	pip install twine
+	twine upload --skip-existing --username "${PYPI_USERNAME}" --password "${PYPI_PASSWORD}" ${TRAVIS_BUILD_DIR}/wheelhouse/*
+fi;
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..6c7b437
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,54 @@
+[bumpversion]
+current_version = 0.8.2
+commit = True
+tag = True
+parse = (?P<major>\d+)
+	\.(?P<minor>\d+)
+	\.(?P<patch>\d+)
+	(?P<release>[a]*)(?P<num>\d*)
+serialize = 
+	{major}.{minor}.{patch}{release}{num}
+	{major}.{minor}.{patch}
+tag_name = {new_version}
+
+[bumpversion:part:release]
+optional_value = placeholder
+first_value = placeholder
+values = 
+	placeholder
+	a
+
+[bumpversion:file:setup.py]
+search = version="{current_version}"
+replace = version="{new_version}"
+
+[bumpversion:file:cobra/__init__.py]
+search = __version__ = "{current_version}"
+replace = __version__ = "{new_version}"
+
+[wheel]
+universal = 1
+
+[flake8]
+max-line-length = 79
+exclude = __init__.py,docs
+
+[pydocstyle]
+match_dir = cobra
+convention = numpy
+match = (?!test_).*\.py
+
+[aliases]
+test = pytest
+
+[tool:pytest]
+testpaths = cobra/test
+
+[isort]
+not_skip = __init__.py
+indent = 4
+line_length = 79
+multi_line_output = 4
+known_third_party = future,six
+known_first_party = cobra
+
diff --git a/setup.py b/setup.py
index 3efc7af..be73b98 100644
--- a/setup.py
+++ b/setup.py
@@ -1,36 +1,10 @@
-from os.path import isfile, abspath, dirname, join
-from sys import argv, path
+# -*- coding: utf-8 -*-
 
-# To temporarily modify sys.path
-SETUP_DIR = abspath(dirname(__file__))
-
-try:
-    from setuptools import setup, find_packages
-except ImportError:
-    path.insert(0, SETUP_DIR)
-    import ez_setup
-    path.pop(0)
-    ez_setup.use_setuptools()
-    from setuptools import setup, find_packages
+from os.path import abspath, dirname, isfile, join
+from sys import argv
+from warnings import warn
 
-
-# for running parallel tests due to a bug in python 2.7.3
-# http://bugs.python.org/issue15881#msg170215
-try:
-    import multiprocessing
-except:
-    None
-
-# import version to get the version string
-path.insert(0, join(SETUP_DIR, "cobra"))
-from version import get_version, update_release_version
-path.pop(0)
-version = get_version(pep440=True)
-
-# If building something for distribution, ensure the VERSION
-# file is up to date
-if "sdist" in argv or "bdist_wheel" in argv:
-    update_release_version()
+from setuptools import setup, find_packages
 
 # cython is optional for building. The c file can be used directly. However,
 # for certain functions, the c file must be generated, which requires cython.
@@ -132,12 +106,16 @@ except Exception as e:
     print('Could not build CGLPK: {}'.format(e))
     ext_modules = None
 
+setup_requirements = []
+# prevent pytest-runner from being installed on every invocation
+if {'pytest', 'test', 'ptr'}.intersection(argv):
+    setup_requirements.append("pytest-runner")
+
 extras = {
     'matlab': ["pymatbridge"],
     'sbml': ["python-libsbml", "lxml"],
-    'array': ["numpy>=1.6", "scipy>=0.11.0"],
-    'test': ["pytest", "pytest-benchmark"],
-    'display': ["matplotlib", "palettable", "pandas>=0.17.0", "tabulate"]
+    'array': ["scipy>=0.11.0"],
+    'display': ["matplotlib", "palettable"]
 }
 
 all_extras = {'Cython>=0.21'}
@@ -166,22 +144,22 @@ except:
 
 setup(
     name="cobra",
-    version=version,
-    packages=find_packages(exclude=['cobra.oven', 'cobra.oven*']),
-    setup_requires=[],
-    install_requires=["six"],
-    tests_require=["jsonschema > 2.5"],
+    version="0.8.2",
+    packages=find_packages(),
+    setup_requires=setup_requirements,
+    install_requires=["future", "swiglpk", "optlang>=1.2.1",
+                      "ruamel.yaml<0.15",
+                      "pandas>=0.17.0", "numpy>=1.6", "tabulate"],
+    tests_require=["jsonschema > 2.5", "pytest", "pytest-benchmark"],
     extras_require=extras,
     ext_modules=ext_modules,
 
     package_data={
          '': ['test/data/*',
-              'VERSION',
               'mlab/matlab_scripts/*m']},
 
-    author="Daniel Robert Hyduke <danielhyduke at gmail.com>, "
-    "Ali Ebrahim <aebrahim at ucsd.edu>",
-    author_email="aebrahim at ucsd.edu",
+    author="The cobrapy core team",
+    author_email="cobra-pie at googlegroups.com",
     description="COBRApy is a package for constraints-based modeling of "
     "biological networks",
     license="LGPL/GPL v2+",
@@ -202,6 +180,7 @@ setup(
         'Programming Language :: Python :: 2.7',
         'Programming Language :: Python :: 3.4',
         'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
         'Programming Language :: Cython',
         'Programming Language :: Python :: Implementation :: CPython',
         'Topic :: Scientific/Engineering',
diff --git a/tox.ini b/tox.ini
index 68ef7fa..0b4e78e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,20 @@
 [tox]
-envlist = pep8, py27, py35
-
-[testenv:pep8]
-basepython=python
-deps=pep8
-commands=pep8 cobra --exclude=oven,solvers,sbml.py
+envlist = pep8, py27, py35, py36
 
 [testenv]
+passenv =
+    CI
+    TRAVIS
+    TRAVIS_*
+deps=
+    pytest
+    pytest-benchmark
+commands =
+    pytest --benchmark-skip cobra
+
+[testenv:pep8]
+skip_install = True
+deps =
+    pep8
 commands =
-    pip install -U pip
-    pytest --benchmark-skip
+    pep8 --exclude=solvers --show-source cobra

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-cobra.git



More information about the debian-med-commit mailing list