[lmfit-py] 02/04: Imported Upstream version 0.8.0

Frédéric-Emmanuel Picca picca at moszumanska.debian.org
Wed Oct 8 04:22:05 UTC 2014


This is an automated email from the git hooks/post-receive script.

picca pushed a commit to branch master
in repository lmfit-py.

commit 51e44e224e0825f7b1d97f81870dab2cde0ed29c
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date:   Tue Oct 7 21:02:30 2014 +0200

    Imported Upstream version 0.8.0
---
 .gitignore                                         |   2 +-
 LICENSE                                            |  11 +-
 README.md => README.rst                            |  19 +-
 THANKS.txt                                         |  12 +-
 doc/Makefile                                       |  14 +-
 doc/_images/conf_interval1.png                     | Bin 21741 -> 21021 bytes
 doc/_images/conf_interval1a.png                    | Bin 19853 -> 18892 bytes
 doc/_images/conf_interval2.png                     | Bin 16793 -> 16756 bytes
 doc/_images/model_fit2a.png                        | Bin 0 -> 28079 bytes
 doc/_images/models_peak1.png                       | Bin 161561 -> 32134 bytes
 doc/_images/models_peak2.png                       | Bin 175732 -> 35125 bytes
 doc/_images/models_peak3.png                       | Bin 159332 -> 33508 bytes
 doc/_images/models_peak4.png                       | Bin 164721 -> 33004 bytes
 doc/_templates/indexsidebar.html                   |  10 +
 doc/_templates/layout.html                         |  58 --
 doc/builtin_models.rst                             | 325 ++++----
 doc/conf.py                                        |  16 +-
 doc/confidence.rst                                 | 117 +--
 doc/constraints.rst                                |  36 +-
 doc/extensions.py                                  |  10 +
 doc/fitting.rst                                    | 158 ++--
 doc/index.rst                                      |  77 +-
 doc/installation.rst                               |  58 +-
 doc/model.rst                                      | 824 +++++++++++++++------
 doc/parameters.rst                                 | 129 +++-
 doc/sphinx/ext_mathjax.py                          |  10 +
 doc/sphinx/ext_pngmath.py                          |  10 +
 doc/sphinx/mathjax/conf.py                         | 180 -----
 doc/sphinx/pngmath/conf.py                         | 180 -----
 doc/sphinx/theme/lmfitdoc/layout.html              |  57 ++
 examples/{example_ci.py => confidence_interval.py} |   0
 .../{example_ci2.py => confidence_interval2.py}    |   0
 examples/doc_basic_valuesdict.py                   |  45 ++
 examples/doc_confidence1.py                        |  24 +
 examples/doc_confidence2.py                        |  61 ++
 examples/doc_model1.py                             |   2 +-
 examples/doc_model2.py                             |  11 +-
 examples/doc_model_with_iter_callback.py           |  43 ++
 examples/doc_nistgauss.py                          |  31 +-
 examples/doc_nistgauss2.py                         |  12 +-
 examples/doc_peakmodels.py                         |  37 +
 examples/doc_stepmodel.py                          |  19 +-
 examples/doc_withreport.py                         |  50 ++
 examples/example_peakmodel.py                      |  33 -
 examples/models_doc1.py                            |  47 --
 lmfit/__init__.py                                  |  57 +-
 lmfit/lineshapes.py                                |  34 +-
 lmfit/minimizer.py                                 |  82 +-
 lmfit/model.py                                     | 564 ++++++++++----
 lmfit/models.py                                    | 350 ++++-----
 lmfit/old_models1d.py                              | 400 ----------
 lmfit/parameter.py                                 |  45 +-
 lmfit/printfuncs.py                                |  48 +-
 lmfit/ui/__init__.py                               |  42 ++
 lmfit/ui/basefitter.py                             | 320 ++++++++
 lmfit/ui/ipy_fitter.py                             | 262 +++++++
 lmfit/wrap.py                                      | 137 ----
 requirements.txt                                   |   3 +-
 setup.py                                           |   2 +-
 tests/test_copy_params.py                          |  36 +
 tests/test_model.py                                | 331 ++++++---
 tests/test_stepmodel.py                            |  44 +-
 tests/test_wrap_function.py                        |  31 -
 63 files changed, 3256 insertions(+), 2260 deletions(-)

diff --git a/.gitignore b/.gitignore
index 5750c74..bee7276 100755
--- a/.gitignore
+++ b/.gitignore
@@ -7,4 +7,4 @@ build
 dist
 lmfit.egg-info
 sandbox/
-
+*.swp
diff --git a/LICENSE b/LICENSE
index 6c73f16..4511525 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2,15 +2,17 @@ Copyright, Licensing, and Re-distribution
 -----------------------------------------
 
 The LMFIT-py code is distribution under the following license:
-  
-  Copyright (c) 2012 Matthew Newville, The University of Chicago
-                     Till Stensitzki, Freie Universitat Berlin'
+
+  Copyright (c) 2014 Matthew Newville, The University of Chicago
+                     Till Stensitzki, Freie Universitat Berlin
+                     Daniel B. Allen, Johns Hopkins University
+                     Antonino Ingargiola, University of California, Los Angeles
 
   Permission to use and redistribute the source code or binary forms of this
   software and its documentation, with or without modification is hereby
   granted provided that the above notice of copyright, these terms of use,
   and the disclaimer of warranty below appear in the source code and
-  documentation, and that none of the names of above institutions or 
+  documentation, and that none of the names of above institutions or
   authors appear in advertising or endorsement of works derived from this
   software without specific prior written permission from all parties.
 
@@ -21,4 +23,3 @@ The LMFIT-py code is distribution under the following license:
   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   DEALINGS IN THIS SOFTWARE.
-
diff --git a/README.md b/README.rst
similarity index 82%
rename from README.md
rename to README.rst
index eacaee5..0175924 100644
--- a/README.md
+++ b/README.rst
@@ -1,7 +1,11 @@
 LMfit-py
 ========
 
-[![build status](https://travis-ci.org/lmfit/lmfit-py.png?branch=master)](https://travis-ci.org/lmfit/lmfit-py)
+.. image:: https://travis-ci.org/lmfit/lmfit-py.png
+   :target: https://travis-ci.org/lmfit/lmfit-py
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.png
+   :target: http://dx.doi.org/??/zenodo.?
 
 LMfit-py provides a Least-Squares Minimization routine and class
 with a simple, flexible approach to parameterizing a model for
@@ -11,14 +15,14 @@ addition, parameters can be constrained as a simple mathematical
 expression of other Parameters.
 
 To do this, the programmer defines a Parameters object, an enhanced
-dictionary, containing named parameters:
+dictionary, containing named parameters::
 
     fit_params = Parameters()
     fit_params['amp'] = Parameter(value=1.2, min=0.1, max=1000)
     fit_params['cen'] = Parameter(value=40.0, vary=False),
     fit_params['wid'] = Parameter(value=4, min=0)}
 
-or using the equivalent
+or using the equivalent:
 
     fit_params = Parameters()
     fit_params.add('amp', value=1.2, min=0.1, max=1000)
@@ -27,7 +31,7 @@ or using the equivalent
 
 The programmer will also write a function to be minimized (in the
 least-squares sense) with its first argument being this Parameters object,
-and additional positional and keyword arguments as desired:
+and additional positional and keyword arguments as desired::
 
     def myfunc(params, x, data, someflag=True):
         amp = params['amp'].value
@@ -49,7 +53,7 @@ parameters are adjuested and which are fixed happens at run-time, so that
 changing what is varied and what constraints are placed on the parameters
 can easily be modified by the consumer in real-time data analysis.
 
-To perform the fit, the user calls
+To perform the fit, the user calls::
 
     result = minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
 
@@ -60,6 +64,5 @@ statistics and information.
 
 By default, the underlying fit algorithm is the Levenberg-Marquart
 algorithm with numerically-calculated derivatives from MINPACK's lmdif
-function, as used by scipy.optimize.leastsq.  Other solvers (currently
-Simulated Annealing and L-BFGS-B) are also available, though slightly less
-well-tested and supported.
+function, as used by scipy.optimize.leastsq.  Other solvers (Nelder-Mead,
+etc) are also available, though slightly less well-tested and supported.
diff --git a/THANKS.txt b/THANKS.txt
index f03a5a4..70bf7e9 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -1,15 +1,17 @@
 Many people have contributed to lmfit.
 
 Matthew Newville wrote the original implementation.
-Till Stensitzki wrote the improved estimates of confidence intervals,
-     and contributed many tests, bug fixes, and documentation.
-Daniel B. Allan wrote much of the high level Models, and many
+Till Stensitzki wrote the improved estimates of confidence intervals,  and
+     contributed many tests, bug fixes, and documentation.
+Daniel B. Allan wrote much of the high level Model code, and many
      improvements to the testing and documentation.
+Antonino Ingargiola wrote much of the high level Model code and
+     provided many bug fixes.
 J. J. Helmus wrote the MINUT bounds for leastsq, originally in
      leastsqbounds.py, and ported to lmfit.
 E. O. Le Bigot wrote the uncertainties package, a version of which is
-     used here.
+     used is lmfit.
 
 Additional patches, bug fixes, and suggestions have come from
   Christohp Deil, Francois Boulogne, Colin Brosseau, nmearl,
-  Gustavo Pasquevich, and Ben Gamari
+  Gustavo Pasquevich, LiCode, and Ben Gamari
diff --git a/doc/Makefile b/doc/Makefile
index d432b5d..1c72ec9 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -6,8 +6,6 @@ SPHINXOPTS    =
 SPHINXBUILD   = sphinx-build
 PAPER         =
 BUILDDIR      = _build
-JAXMATHCONF   = sphinx/mathjax/conf.py
-PNGMATHCONF   = sphinx/pngmath/conf.py
 INSTALLDIR = /home/newville/public_html/lmfit/
 
 
@@ -20,25 +18,19 @@ ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
 .PHONY: all install pdf
 
 html:
-	cp conf.py SAVEconf.py
-	cp $(JAXMATHCONF) conf.py
+	cp sphinx/ext_mathjax.py extensions.py
 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	cp SAVEconf.py conf.py
 	@echo
 	@echo "html build finished: $(BUILDDIR)/html."
 
 htmlzip: html
-	cp conf.py SAVEconf.py
-	cp $(PNGMATHCONF) conf.py
+	cp sphinx/ext_pngmath.py extensions.py
 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/lmfit_doc
-	cp SAVEconf.py conf.py
 	cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
 
 epub:
-	cp conf.py SAVEconf.py
-	cp $(PNGMATHCONF) conf.py
+	cp sphinx/ext_pngmath.py extensions.py
 	$(SPHINXBUILD) -b epub  $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	cp SAVEconf.py conf.py
 	cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
 
 pdf: latex
diff --git a/doc/_images/conf_interval1.png b/doc/_images/conf_interval1.png
index b2c8432..1115595 100644
Binary files a/doc/_images/conf_interval1.png and b/doc/_images/conf_interval1.png differ
diff --git a/doc/_images/conf_interval1a.png b/doc/_images/conf_interval1a.png
index 7e411fd..a5e30a2 100644
Binary files a/doc/_images/conf_interval1a.png and b/doc/_images/conf_interval1a.png differ
diff --git a/doc/_images/conf_interval2.png b/doc/_images/conf_interval2.png
index 98c3a46..4278493 100644
Binary files a/doc/_images/conf_interval2.png and b/doc/_images/conf_interval2.png differ
diff --git a/doc/_images/model_fit2a.png b/doc/_images/model_fit2a.png
new file mode 100644
index 0000000..a6b2458
Binary files /dev/null and b/doc/_images/model_fit2a.png differ
diff --git a/doc/_images/models_peak1.png b/doc/_images/models_peak1.png
index 79b49cf..c74adba 100644
Binary files a/doc/_images/models_peak1.png and b/doc/_images/models_peak1.png differ
diff --git a/doc/_images/models_peak2.png b/doc/_images/models_peak2.png
index 9fc6000..05d5b87 100644
Binary files a/doc/_images/models_peak2.png and b/doc/_images/models_peak2.png differ
diff --git a/doc/_images/models_peak3.png b/doc/_images/models_peak3.png
index 6b04fcf..706514d 100644
Binary files a/doc/_images/models_peak3.png and b/doc/_images/models_peak3.png differ
diff --git a/doc/_images/models_peak4.png b/doc/_images/models_peak4.png
index e1a732c..ce9ceb8 100644
Binary files a/doc/_images/models_peak4.png and b/doc/_images/models_peak4.png differ
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index 960a7b3..d0f0ac2 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -6,10 +6,20 @@
 <p>Development version: <br> 
     <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
 
+<h3>Support and Feedback</h3>
+
+  <a href="https://groups.google.com/group/lmfit-py"> Mailing List</a> 
+
+<br>
+  <a href="https://github.com/lmfit/lmfit-py/issues"> Issue Tracker</a>
+
 <h3>Off-line Documentation</h3>
+
 [<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.pdf">PDF</a>
 |<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.epub">EPUB</a>
 |<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>
 ]
+
+
 <hr> 
 <p>
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html
deleted file mode 100644
index 92a1cb0..0000000
--- a/doc/_templates/layout.html
+++ /dev/null
@@ -1,58 +0,0 @@
-{% extends "!layout.html" %}
-
-{%- block extrahead %}
-  <script type="text/x-mathjax-config">
-     MathJax.Hub.Config({
-        "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
-        "HTML-CSS": {scale: 90}
-  });</script>
-{% endblock %}
-
-
-
-{% block rootrellink %}
-   <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
-   <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
-   <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
-   <li><a href="{{ pathto('model') }}"> model</a>|</li>
-   <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
-   <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
-   <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
-   <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
-{% endblock %}
-
-{% block relbar1 %}
-<div>
-<table border=0>
-  <tr><td></td><td width=75% padding=5 align=left>
-       <a href="index.html" style="color: #157"> <font size=+2>LMFIT</font></a>
-     </td><td></td>
-     <td width=8% align=left>
-         <a href="contents.html" style="color: #882222">
-         <font size+=1>Contents</font></a> </td>
-     <td width=8% align=left>
-          <a href="installation.html" style="color: #882222">
-          <font size+=1>Download</font></a></td>
-     <td width=8% align=left>
-        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
-         <font size+=1>Develop</font></a></td>
-  </tr>
-  <tr><td></td><td width=75% padding=5 align=left>
-        <a href="index.html" style="color: #157"> <font size=+1>
-	Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
-     </td><td></td>
-     <td width=8% align=left>
-         <a href="intro.html" style="color: #882222">
-         <font size+=1>Introduction</font></a> </td>
-     <td width=8% align=left>
-         <a href="parameters.html" style="color: #882222">
-         <font size+=1>Parameters</font></a> </td>
-     <td width=8% align=left>
-         <a href="model.html" style="color: #882222">
-         <font size+=1>Models</font></a> </td>
-
-  </tr>
-</table>
-</div>
-{{ super() }}
-{% endblock %}
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 9185311..a6d310e 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -1,8 +1,11 @@
 .. _builtin_models_chapter:
 
-=================================================
-Built-in Fitting Models in the :mod:`models`
-=================================================
+=====================================================
+Built-in Fitting Models in the :mod:`models` module
+=====================================================
+
+.. module:: models
+
 
 Lmfit provides several builtin fitting models in the :mod:`models` module.
 These pre-defined models each subclass from the :class:`Model` class of the
@@ -11,12 +14,10 @@ Gaussians, Lorentzian, and Exponentials that are used in a wide range of
 scientific domains.  In fact, all the models are all based on simple, plain
 python functions defined in the :mod:`lineshapes` module.  In addition to
 wrapping a function into a :class:`Model`, these models also provide a
-:meth:`guess_starting_values` method that is intended to give a reasonable
+:meth:`guess` method that is intended to give a reasonable
 set of starting values from a data array that closely approximates the
 data to be fit.
 
-.. module:: models
-
 As shown in the previous chapter, a key feature of the :class:`Model` class
 is that models can easily be combined to give a composite
 :class:`Model`. Thus while some of the models listed here may seem pretty
@@ -29,9 +30,6 @@ example,  a Lorentzian plus a linear background might be represented as::
     >>> background  = LinearModel()
     >>> model = peak + background
 
-
-
-
 All the models listed below are one dimensional, with an independent
 variable named ``x``.  Many of these models represent a function with a
 distinct peak, and so share common features.  To maintain uniformity,
@@ -49,7 +47,9 @@ Peak-like models
 
 There are many peak-like models available.  These include
 :class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel` and
-some less commonly used variations.
+some less commonly used variations.  The :meth:`guess`
+methods for all of these make a fairly crude guess for the value of
+``amplitude``, but also set a lower bound of 0 on the value of ``sigma``.
 
 :class:`GaussianModel`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -144,7 +144,7 @@ in
   \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big] + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
 
 
-The :meth:`guess_starting_values` function always gives a starting
+The :meth:`guess` function always gives a starting
 value for ``fraction`` of 0.5
 
 :class:`Pearson7Model`
@@ -154,24 +154,17 @@ value for ``fraction`` of 0.5
 
 A model based on a `Pearson VII distribution
 <http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution>`_.
-This is another Voigt-like distribution function.  It has the usual
+This is a Lorenztian-like distribution function.  It has the usual
 parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also ``exponent`` (:math:`p`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma, p) = \frac{sA}{\big\{[1 + (\frac{x-\mu}{\sigma})^2] (2^{1/p} -1)  \big\}^p}
-
-where
+``sigma`` (:math:`\sigma`), and also an ``exponent`` (:math:`m`) in
 
 .. math::
 
-    s = \frac{\Gamma(p) \sqrt{2^{1/p} -1}}{ \sigma\sqrt{\pi}\,\Gamma(p-1/2)}
-
-where :math:`\Gamma(x)` is the gamma function.
+    f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2}  \bigr]^{-m}
 
-The :meth:`guess_starting_values` function always gives a starting
-value for ``exponent`` of 0.5.
+where :math:`\beta` is the beta function (see :func:`scipy.special.beta` in
+:mod:`scipy.special`).  The :meth:`guess` function always
+gives a starting value for ``exponent`` of 1.5.
 
 :class:`StudentsTModel`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -251,11 +244,32 @@ It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
 
     f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
     \exp\bigl[\gamma({\mu - x  + \sigma^2/2})\bigr]
-    {\operatorname{erfc}}\bigl[\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\bigr]
+    {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
 
 
 where :func:`erfc` is the complimentary error function.
 
+:class:`SkewedGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: SkewedGaussianModel()
+
+A variation of the above model, this is a `Skewed normal distribution
+<http://en.wikipedia.org/wiki/Skew_normal_distribution>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
+  e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
+      {\operatorname{erf}}\bigl[
+         \frac{\gamma(x-\mu)}{\sigma\sqrt{2\pi}}
+     \bigr] \Bigr\}
+
+
+where :func:`erf` is the error function.
+
 
 :class:`DonaichModel`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -343,7 +357,8 @@ with parameters ``a``, ``b``, and ``c``.
     f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i  x^i
 
 with parameters ``c0``, ``c1``, ..., ``c7``.  The supplied ``degree``
-will specify how many of these are actual variable parameters.
+will specify how many of these are actual variable parameters.  This uses
+:func:`numpy.polyval` for its calculation of the polynomial.
 
 
 
@@ -438,10 +453,10 @@ form:
 
 
 
-Example 1: Fit Peaked data to Gaussian or Voigt profiles
-------------------------------------------------------------------
+Example 1: Fit Peaked data to Gaussian, Lorentzian, and  Voigt profiles
+------------------------------------------------------------------------
 
-Here, we will fit data to two similar lineshapes, in order to decide which
+Here, we will fit data to three similar lineshapes, in order to decide which
 might be the better model.  We will start with a Gaussian profile, as in
 the previous chapter, but use the built-in :class:`GaussianModel` instead
 of one we write ourselves.  This is a slightly different version from the
@@ -456,31 +471,34 @@ built-in default values.  So, we'll simply use::
     y = data[:, 1]
 
     mod = GaussianModel()
-    mod.guess_starting_values(y, x=x)
-    out  = mod.fit(y, x=x)
-    print(mod.fit_report(min_correl=0.25))
+    pars = mod.guess(y, x=x)
+    out  = mod.fit(y, pars, x=x)
+    print(out.fit_report(min_correl=0.25))
 
 which prints out the results::
 
+    [[Model]]
+        gaussian
     [[Fit Statistics]]
-        # function evals   = 25
+        # function evals   = 21
         # data points      = 401
         # variables        = 3
         chi-square         = 29.994
         reduced chi-square = 0.075
     [[Variables]]
-        amplitude:     30.31352 +/- 0.1571252 (0.52%) initial =  21.54192
-        center:        9.242771 +/- 0.00737481 (0.08%) initial =  9.25
-        fwhm:          2.901562 +/- 0.01736635 (0.60%) == '2.354820*sigma'
-        sigma:         1.23218 +/- 0.00737481 (0.60%) initial =  1.35
+        amplitude:   30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
+        center:      9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
+        fwhm:        2.90156963 +/- 0.017366 (0.60%)  == '2.3548200*sigma'
+        sigma:       1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
     [[Correlations]] (unreported correlations are <  0.250)
         C(amplitude, sigma)          =  0.577
 
-We see a few interesting differences from the results of the previous
-chapter.  First, the parameter names are longer.  Second, there is a
-``fwhm``, defined as :math:`\sim 2.355\sigma`.  And third, the automated
-initial guesses are pretty good.  A plot of the fit shows not such a great
-fit:
+
+[We see a few interesting differences from the results of the previous
+ chapter. First, the parameter names are longer. Second, there is a
+ ``fwhm`` parameter, defined as :math:`\sim 2.355\sigma`. And third, the
+ automated initial guesses are pretty good. A plot of the fit shows not
+ such a great fit:
 
 .. _figA1:
 
@@ -500,55 +518,60 @@ Perhaps a Lorentzian would be better?  To do this, we simply replace
 
     from lmfit.models import LorentzianModel
     mod = LorentzianModel()
-    mod.guess_starting_values(y, x=x)
-    out  = mod.fit(y, x=x)
-    print(mod.fit_report(min_correl=0.25))
+    pars = mod.guess(y, x=x)
+    out  = mod.fit(y, pars, x=x)
+    print(out.fit_report(min_correl=0.25))
 
-The results, or course, are worse::
+Predictably, the first thing we try gives results that are worse::
 
+    [[Model]]
+        lorentzian
     [[Fit Statistics]]
-        # function evals   = 29
+        # function evals   = 25
         # data points      = 401
         # variables        = 3
         chi-square         = 53.754
         reduced chi-square = 0.135
     [[Variables]]
-        amplitude:     38.97278 +/- 0.3138612 (0.81%) initial =  21.54192
-        center:        9.244389 +/- 0.009276152 (0.10%) initial =  9.25
-        fwhm:          2.30968 +/- 0.02631297 (1.14%) == '2.0000000*sigma'
-        sigma:         1.15484 +/- 0.01315648 (1.14%) initial =  1.35
+        amplitude:   38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
+        center:      9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
+        fwhm:        2.30969034 +/- 0.026312 (1.14%)  == '2.0000000*sigma'
+        sigma:       1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
     [[Correlations]] (unreported correlations are <  0.250)
         C(amplitude, sigma)          =  0.709
 
 
-with the plot shown in the figure above.
+with the plot shown on the right in the figure above.
 
 A Voigt model does a better job.  Using :class:`VoigtModel`, this is
 as simple as::
 
-    from lmfit.models import LorentzianModel
-    mod = LorentzianModel()
-    mod.guess_starting_values(y, x=x)
-    out  = mod.fit(y, x=x)
-    print(mod.fit_report(min_correl=0.25))
+    from lmfit.models import VoigtModel
+    mod = VoigtModel()
+    pars = mod.guess(y, x=x)
+    out  = mod.fit(y, pars, x=x)
+    print(out.fit_report(min_correl=0.25))
 
 which gives::
 
+    [[Model]]
+        voigt
     [[Fit Statistics]]
-        # function evals   = 30
+        # function evals   = 17
         # data points      = 401
         # variables        = 3
         chi-square         = 14.545
         reduced chi-square = 0.037
     [[Variables]]
-        amplitude:     35.75536 +/- 0.1386167 (0.39%) initial =  21.54192
-        center:        9.244111 +/- 0.005055079 (0.05%) initial =  9.25
-        fwhm:          2.629512 +/- 0.01326999 (0.50%) == '3.6013100*sigma'
-        gamma:         0.7301542 +/- 0.003684769 (0.50%) == 'sigma'
-        sigma:         0.7301542 +/- 0.003684769 (0.50%) initial =  1.35
+        amplitude:   35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
+        center:      9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
+        fwhm:        2.62951718 +/- 0.013269 (0.50%)  == '3.6013100*sigma'
+        gamma:       0.73015574 +/- 0.003684 (0.50%)  == 'sigma'
+        sigma:       0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
     [[Correlations]] (unreported correlations are <  0.250)
         C(amplitude, sigma)          =  0.651
 
+
 with the much better value for :math:`\chi^2` and the obviously better
 match to the data as seen in the figure below (left).
 
@@ -572,48 +595,52 @@ the ``gamma`` parameter from a constrained expression and give it a
 starting value::
 
     mod = VoigtModel()
-    mod.guess_starting_values(y, x=x)
-    mod.params['gamma'].expr  = None
-    mod.params['gamma'].value = 0.7
+    pars = mod.guess(y, x=x)
+    pars['gamma'].set(value=0.7, vary=True, expr='')
 
-    out  = mod.fit(y, x=x)
-    print(mod.fit_report(min_correl=0.25))
+    out  = mod.fit(y, pars, x=x)
+    print(out.fit_report(min_correl=0.25))
 
 which gives::
 
+    [[Model]]
+        voigt
     [[Fit Statistics]]
-        # function evals   = 32
+        # function evals   = 21
         # data points      = 401
         # variables        = 4
         chi-square         = 10.930
         reduced chi-square = 0.028
     [[Variables]]
-        amplitude:     34.19147 +/- 0.1794683 (0.52%) initial =  21.54192
-        center:        9.243748 +/- 0.00441902 (0.05%) initial =  9.25
-        fwhm:          3.223856 +/- 0.05097446 (1.58%) == '3.6013100*sigma'
-        gamma:         0.5254013 +/- 0.01857953 (3.54%) initial =  0.7
-        sigma:         0.8951898 +/- 0.01415442 (1.58%) initial =  1.35
+        amplitude:   34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
+        center:      9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
+        fwhm:        3.22385491 +/- 0.050974 (1.58%)  == '3.6013100*sigma'
+        gamma:       0.52540157 +/- 0.018579 (3.54%) (init= 0.7)
+        sigma:       0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
     [[Correlations]] (unreported correlations are <  0.250)
         C(amplitude, gamma)          =  0.821
 
-and the fit shown above (on the right).
+
+and the fit shown on the right above.
 
 Comparing the two fits with the Voigt function, we see that :math:`\chi^2`
-is definitely better with a separately varying ``gamma`` parameter.  In
+is definitely improved with a separately varying ``gamma`` parameter.  In
 addition, the two values for ``gamma`` and ``sigma`` differ significantly
 -- well outside the estimated uncertainties.  Even more compelling, reduced
 :math:`\chi^2` is improved even though a fourth variable has been added to
-the fit, justifying it as a significant variable in the model.
+the fit.  In the simplest statistical sense, this suggests that ``gamma``
+is a significant variable in the model.
 
 
 This example shows how easy it can be to alter and compare fitting models
-for simple problems.
+for simple problems.  The example is included in the ``doc_peakmodels.py``
+file in the examples directory.
+
 
 
 Example 2: Fit data to a Composite Model with pre-defined models
 ------------------------------------------------------------------
 
-
 Here, we repeat the point made at the end of the last chapter that instances
 of :class:`Model` class can be added them together to make a *composite
 model*.  But using the large number of built-in models available, this is
@@ -623,26 +650,34 @@ constant:
 .. literalinclude:: ../examples/doc_stepmodel.py
 
 After constructing step-like data, we first create a :class:`StepModel`
-telling it to use the ``erf`` form (see details below), and a
+telling it to use the ``erf`` form (see details above), and a
 :class:`ConstantModel`.  We set initial values, in one case using the data
-and :meth:`guess_starting_values` method, and using the explicit
-:meth:`set_paramval` for the initial constant value.    Making a composite
-model, we run :meth:`fit` and report the results, which give::
+and :meth:`guess` method for the intial step function paramaters, and
+:meth:`make_params` arguments for the linear component.
+After making a composite model, we run :meth:`fit` and report the
+results, which give::
+
 
+    [[Model]]
+     Composite Model:
+        step(prefix='step_',form='erf')
+        linear(prefix='line_')
     [[Fit Statistics]]
-        # function evals   = 52
+        # function evals   = 49
         # data points      = 201
-        # variables        = 4
-        chi-square         = 600.191
-        reduced chi-square = 3.047
+        # variables        = 5
+        chi-square         = 633.465
+        reduced chi-square = 3.232
     [[Variables]]
-        amplitude:     111.1106 +/- 0.3122441 (0.28%) initial =  115.3431
-        c:             11.31151 +/- 0.2631688 (2.33%) initial =  9.278188
-        center:        3.122191 +/- 0.00506929 (0.16%) initial =  5
-        sigma:         0.6637199 +/- 0.009799607 (1.48%) initial =  1.428571
+        line_intercept:   11.5685248 +/- 0.285611 (2.47%) (init= 10.72406)
+        line_slope:       2.03270159 +/- 0.096041 (4.72%) (init= 0)
+        step_amplitude:   112.270535 +/- 0.674790 (0.60%) (init= 136.3006)
+        step_center:      3.12343845 +/- 0.005370 (0.17%) (init= 2.5)
+        step_sigma:       0.67468813 +/- 0.011336 (1.68%) (init= 1.428571)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(c, center)                 =  0.381
-        C(amplitude, sigma)          =  0.381
+        C(step_amplitude, step_sigma)  =  0.564
+        C(line_intercept, step_center)  =  0.428
+        C(step_amplitude, step_center)  =  0.109
 
 with a plot of
 
@@ -669,52 +704,49 @@ involving a decaying exponential and two gaussians.
 
 where we give a separate prefix to each model (they all have an
 ``amplitude`` parameter).  The ``prefix`` values are attached transparently
-to the models.  Note that the calls to :meth:`set_paramval` used the bare
-name, without the prefix.   We could have used them, but because we used
-the individual model ``gauss1`` and ``gauss2``, there was no need.  Had we
-used the composite model to set the initial parameter values, we would have
-needed to, as with::
+to the models.
 
-    ## WRONG
-    mod.set_paramval('amplitude', 500, min=10)
+MN----: Note that the calls to :meth:`make_param` used the bare
+name, without the prefix.  We could have used them, but because we used the
+individual model ``gauss1`` and ``gauss2``, there was no need.
 
-    ## Raises KeyError: "'amplitude' not a parameter name"
-
-    ## Correct
-    mod.set_paramval('g1_amplitude', 501, min=10)
 
+Note also in the example here that we explicitly set bounds on many of the
+parameter values.
 
 The fit results printed out are::
 
+    [[Model]]
+     Composite Model:
+        gaussian(prefix='g1_')
+        gaussian(prefix='g2_')
+        exponential(prefix='exp_')
     [[Fit Statistics]]
-        # function evals   = 66
+        # function evals   = 55
         # data points      = 250
         # variables        = 8
         chi-square         = 1247.528
         reduced chi-square = 5.155
     [[Variables]]
-        exp_amplitude:     99.01833 +/- 0.5374884 (0.54%) initial =  162.2102
-        exp_decay:         90.95088 +/- 1.103105 (1.21%) initial =  93.24905
-        g1_amplitude:      4257.774 +/- 42.38366 (1.00%) initial =  500
-        g1_center:         107.031 +/- 0.1500691 (0.14%) initial =  105
-        g1_fwhm:           39.26092 +/- 0.3779083 (0.96%) == '2.354820*g1_sigma'
-        g1_sigma:          16.67258 +/- 0.1604829 (0.96%) initial =  12
-        g2_amplitude:      2493.417 +/- 36.16923 (1.45%) initial =  500
-        g2_center:         153.2701 +/- 0.194667 (0.13%) initial =  150
-        g2_fwhm:           32.51287 +/- 0.4398624 (1.35%) == '2.354820*g2_sigma'
-        g2_sigma:          13.80695 +/- 0.1867924 (1.35%) initial =  12
-    [[Correlations]] (unreported correlations are <  0.100)
+        exp_amplitude:   99.0183291 +/- 0.537487 (0.54%) (init= 162.2102)
+        exp_decay:       90.9508788 +/- 1.103104 (1.21%) (init= 93.24905)
+        g1_amplitude:    4257.77384 +/- 42.38354 (1.00%) (init= 2000)
+        g1_center:       107.030955 +/- 0.150068 (0.14%) (init= 105)
+        g1_fwhm:         39.2609205 +/- 0.377907 (0.96%)  == '2.3548200*g1_sigma'
+        g1_sigma:        16.6725781 +/- 0.160482 (0.96%) (init= 15)
+        g2_amplitude:    2493.41747 +/- 36.16907 (1.45%) (init= 2000)
+        g2_center:       153.270103 +/- 0.194665 (0.13%) (init= 155)
+        g2_fwhm:         32.5128760 +/- 0.439860 (1.35%)  == '2.3548200*g2_sigma'
+        g2_sigma:        13.8069474 +/- 0.186791 (1.35%) (init= 15)
+    [[Correlations]] (unreported correlations are <  0.500)
         C(g1_amplitude, g1_sigma)    =  0.824
         C(g2_amplitude, g2_sigma)    =  0.815
         C(g1_sigma, g2_center)       =  0.684
         C(g1_amplitude, g2_center)   =  0.648
         C(g1_center, g2_center)      =  0.621
         C(g1_center, g1_sigma)       =  0.507
-        C(g1_amplitude, g1_center)   =  0.418
-        C(exp_amplitude, g2_amplitude)  =  0.282
-        C(exp_amplitude, g2_sigma)   =  0.171
-        C(exp_amplitude, g1_amplitude)  =  0.148
-        C(exp_decay, g1_center)      =  0.105
+
+
 
 We get a very good fit to this challenging problem (described at the NIST
 site as of average difficulty, but the tests there are generally hard) by
@@ -732,9 +764,12 @@ on the parameter values.  This fit is shown on the left:
 
 
 One final point on setting initial values.  From looking at the data
-itself, we can see the two Gaussian peaks are reasonably well centered.  We
-can simplify the initial parameter values by using this, and by defining an
-:func:`index_of` function to limit the data range.  That is, with::
+itself, we can see the two Gaussian peaks are reasonably well separated but
+do overlap. Furthermore, we can tell that the initial guess for the
+decaying exponential component was poorly estimated because we used the
+full data range.  We can simplify the initial parameter values by using
+this, and by defining an :func:`index_of` function to limit the data range.
+That is, with::
 
     def index_of(arrval, value):
         "return index of array *at or below* value "
@@ -745,13 +780,19 @@ can simplify the initial parameter values by using this, and by defining an
     ix2 = index_of(x, 135)
     ix3 = index_of(x, 175)
 
-    exp_mod.guess_starting_values(y[:ix1], x=x[:ix1])
-    gauss1.guess_starting_values(y[ix1:ix2], x=x[ix1:ix2])
-    gauss2.guess_starting_values(y[ix2:ix3], x=x[ix2:ix3])
+    exp_mod.guess(y[:ix1], x=x[:ix1])
+    gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
+    gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
 
 we can get a better initial estimate, and the fit converges in fewer steps,
-and without any bounds on parameters::
-
+getting to identical values (to the precision printed out in the report),
+and without any bounds on parameters at all::
+
+    [[Model]]
+     Composite Model:
+        gaussian(prefix='g1_')
+        gaussian(prefix='g2_')
+        exponential(prefix='exp_')
     [[Fit Statistics]]
         # function evals   = 46
         # data points      = 250
@@ -759,16 +800,16 @@ and without any bounds on parameters::
         chi-square         = 1247.528
         reduced chi-square = 5.155
     [[Variables]]
-        exp_amplitude:     99.01833 +/- 0.5374875 (0.54%) initial =  94.53724
-        exp_decay:         90.95089 +/- 1.103105 (1.21%) initial =  111.1985
-        g1_amplitude:      4257.773 +/- 42.38338 (1.00%) initial =  2126.432
-        g1_center:         107.031 +/- 0.1500679 (0.14%) initial =  106.5
-        g1_fwhm:           39.26091 +/- 0.3779053 (0.96%) == '2.354820*g1_sigma'
-        g1_sigma:          16.67258 +/- 0.1604816 (0.96%) initial =  14.5
-        g2_amplitude:      2493.418 +/- 36.16948 (1.45%) initial =  1878.892
-        g2_center:         153.2701 +/- 0.1946675 (0.13%) initial =  150
-        g2_fwhm:           32.51288 +/- 0.4398666 (1.35%) == '2.354820*g2_sigma'
-        g2_sigma:          13.80695 +/- 0.1867942 (1.35%) initial =  15
+        exp_amplitude:   99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
+        exp_decay:       90.9508863 +/- 1.103105 (1.21%) (init= 111.1985)
+        g1_amplitude:    4257.77321 +/- 42.38338 (1.00%) (init= 2126.432)
+        g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 106.5)
+        g1_fwhm:         39.2609141 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
+        g1_sigma:        16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
+        g2_amplitude:    2493.41766 +/- 36.16948 (1.45%) (init= 1878.892)
+        g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 150)
+        g2_fwhm:         32.5128777 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
+        g2_sigma:        13.8069481 +/- 0.186794 (1.35%) (init= 15)
     [[Correlations]] (unreported correlations are <  0.500)
         C(g1_amplitude, g1_sigma)    =  0.824
         C(g2_amplitude, g2_sigma)    =  0.815
@@ -778,5 +819,9 @@ and without any bounds on parameters::
         C(g1_center, g1_sigma)       =  0.507
 
 
+
 This example is in the file ``doc_nistgauss2.py`` in the examples folder,
-and the fit result shown on the right above.
+and the fit result shown on the right above shows an improved initial
+estimate of the data.
+
+
diff --git a/doc/conf.py b/doc/conf.py
index 451458e..63e0dfa 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -22,12 +22,7 @@ sys.path.append(os.path.abspath(os.path.join('.')))
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.todo',
-              'sphinx.ext.coverage',
-              'sphinx.ext.mathjax',
-              'sphinx.ext.intersphinx',
-              'numpydoc']
+from extensions import extensions
 
 try:
     import IPython.sphinxext.ipython_directive
@@ -38,8 +33,9 @@ except ImportError:
 
 
 intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
-                       'numpy': ('http://scipy.org/docs/numpy/', None),
-                       'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
+                       'numpy': ('http://docs.scipy.org/doc/numpy/', None),
+                       'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
+                       }
 
 intersphinx_cache_limit = 10
 
@@ -150,8 +146,8 @@ html_use_smartypants = True
 # Custom sidebar templates, maps document names to template names.
 html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
 
-html_use_modindex = False
-#html_use_index = True
+html_domain_indices = False
+html_use_index = True
 #html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 75bcae3..6c0cdab 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -1,12 +1,12 @@
 Calculation of confidence intervals
 ====================================
 
-.. py:module:: confidence
+.. module:: confidence
 
-Since version `0.5`, lmfit is also capable of calculating the confidence
-intervals directly. For most models, it is not necessary: the estimation
-of the standard error from the estimated covariance matrix is normally quite
-good.
+The lmfit :mod:`confidence` module allows you to explicitly calculate
+confidence intervals for variable parameters.  For most models, it is not
+necessary: the estimation of the standard error from the estimated
+covariance matrix is normally quite good.
 
 But for some models, e.g. a sum of two exponentials, the approximation
 begins to fail. For this case, lmfit has the function :func:`conf_interval`
@@ -17,6 +17,7 @@ are more robust.
 
 Method used for calculating confidence intervals
 -------------------------------------------------
+
 The F-test is used to compare our null model, which is the best fit we have
 found, with an alternate model, where one of the parameters is fixed to a
 specific value. The value is changed until the difference between :math:`\chi^2_0`
@@ -32,77 +33,93 @@ N is the number of data-points, P the number of parameter of the null model.
 difference of number of parameters between our null model and the alternate
 model).
 
-A log-likelihood method will be added soon.
+Adding a log-likelihood method is under consideration.
 
 A basic example
 ---------------
 
-First we create a toy problem::
-
+First we create an example problem::
 
     >>> import lmfit
     >>> import numpy as np
     >>> x = np.linspace(0.3,10,100)
     >>> y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
     >>> p = lmfit.Parameters()
-    >>> p.add_many(('a',0.1),('b',1))
+    >>> p.add_many(('a', 0.1), ('b', 1))
     >>> def residual(p):
     ...    a = p['a'].value
     ...    b = p['b'].value
     ...    return 1/(a*x)+b-y
 
 
-We have to fit it, before we can generate the confidence intervals::
+before we can generate the confidence intervals, we have to run a fit, so
+that the automated estimate of the standard errors can be used as a
+starting point::
 
 
     >>> mi = lmfit.minimize(residual, p)
-    >>> mi.leastsq()
     >>> lmfit.printfuncs.report_fit(mi.params)
-    [[Variables]]
-         a:     0.09978076 +/- 0.0002112132 (0.21%) initial =  0.09978076
-         b:     1.992907 +/- 0.0132743 (0.67%) initial =  1.992907
+    [Variables]]
+        a:   0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
+        b:   1.98476945 +/- 0.012226 (0.62%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
         C(a, b)                      =  0.601
 
-
-Now it just a simple function call to start the calculation::
+Now it is just a simple function call to calculate the confidence
+intervals::
 
     >>> ci = lmfit.conf_interval(mi)
     >>> lmfit.printfuncs.report_ci(ci)
          99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
-    a   0.09960   0.09981   0.10000   0.10019   0.10039   0.10058   0.10079
-    b   1.97035   1.98326   1.99544   2.00008   2.01936   2.03154   2.04445
-
-
-As we can see, the estimated error is almost the same, and the
-uncertainties are well behaved: Going from 1 :math:`\sigma` (68%
-confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is fairly
-linear.  For this problem, it is not necessary to calculate confidence
-intervals, and the estimates of the uncertainties from the covariance
-matrix are sufficient.
+    a   0.09886   0.09905   0.09925   0.09944   0.09963   0.09982   0.10003
+    b   1.94751   1.96049   1.97274   1.97741   1.99680   2.00905   2.02203
+
+This shows the best-fit values for the parameters in the `0.00%` column,
+and parameter values that are at the varying confidence levels given by
+steps in :math:`\sigma`.  As we can see, the estimated error is almost the
+same, and the uncertainties are well behaved: Going from 1 :math:`\sigma`
+(68% confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is
+fairly linear.  It can also be seen that the errors are fairy symmetric
+around the best fit value.  For this problem, it is not necessary to
+calculate confidence intervals, and the estimates of the uncertainties from
+the covariance matrix are sufficient.
 
 An advanced example
 -------------------
 
 Now we look at a problem where calculating the error from approximated
-covariance can lead to misleading results::
+covariance can lead to misleading result -- two decaying exponentials.  In
+fact such a problem is particularly hard for the Levenberg-Marquardt
+method, so we fitst estimate the results using the slower but robust
+Nelder-Mead  method, and *then* use Levenberg-Marquardt to estimate the
+uncertainties and correlations::
+
 
-    >>> y = 3*np.exp(-x/2.)-5*np.exp(-x/10.)+0.2*np.random.randn(x.size)
+    >>> x = np.linspace(1, 10, 250)
+    >>> np.random.seed(0)
+    >>> y = 3.0*np.exp(-x/2) -5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(len(x))
+    >>>
     >>> p = lmfit.Parameters()
-    >>> p.add_many(('a1', 5), ('a2', -5), ('t1', 2), ('t2', 5))
+    >>> p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.))
+    >>>
     >>> def residual(p):
-    ...    a1, a2, t1, t2 = [i.value for i in p.values()]
-    ...    return a1*np.exp(-x/t1)+a2*np.exp(-x/t2)-y
-
+    ...    v = p.valuesdict()
+    ...    return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1)/v['t2'])-y
+    >>>
+    >>> # first solve with Nelder-Mead
+    >>> mi = lmfit.minimize(residual, p, method='Nelder')
+    >>> # then solve with Levenberg-Marquardt
     >>> mi = lmfit.minimize(residual, p)
-    >>> mi.leastsq()
-    >>> lmfit.printfuncs.report_fit(mi.params, show_correl=False)
+    >>>
+    >>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
 
     [[Variables]]
-         a1:     2.611013 +/- 0.3279648 (12.56%) initial =  2.611013
-         a2:    -4.512928 +/- 0.3991997 (8.85%) initial = -4.512928
-         t1:     1.569477 +/- 0.3345078 (21.31%) initial =  1.569477
-         t2:     10.96137 +/- 1.263874 (11.53%) initial =  10.96137
+        a1:   2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
+        a2:  -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
+        t1:   1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
+        t2:   11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
+    [[Correlations]] (unreported correlations are <  0.500)
+        C(a2, t2)                    =  0.987
 
 
 Again we call :func:`conf_interval`, this time with tracing and only for 1-
@@ -110,17 +127,19 @@ and 2 :math:`\sigma`::
 
     >>> ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
     >>> lmfit.printfuncs.report_ci(ci)
+
           95.00%    68.00%     0.00%    68.00%    95.00%
-    a1   2.11679   2.33696   2.61101   3.06631   4.28694
-    a2  -6.39449  -5.05982  -4.20173  -4.19528  -3.97850
-    t2   8.00414   9.62688  12.17331  12.17886  13.34857
-    t1   1.07009   1.28482   1.37407   1.97509   2.64341
+    a1   2.71850   2.84525   2.98622   3.14874   3.34076
+    a2  -4.63180  -4.46663  -4.35429  -4.22883  -4.14178
+    t2  10.82699  11.33865  11.78219  12.28195  12.71094
+    t1   1.08014   1.18566   1.38044   1.45566   1.62579
+
 
 Comparing these two different estimates, we see that the estimate for `a1`
-is reasonable well approximated from the covariance matrix, but the
-estimates for `a2`, `t1`, and `t2` are very asymmetric and that going from
-1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma` (95% confidence) is
-not very predictable.
+is reasonably well approximated from the covariance matrix, but the
+estimates for `a2` and especially for `t1`, and `t2` are very asymmetric
+and that going from 1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma`
+(95% confidence) is not very predictable.
 
 Now let's plot a confidence region::
 
@@ -147,7 +166,11 @@ which shows the figure on the left below for ``a1`` and ``t2``, and for
 Neither of these plots is very much like an ellipse, which is implicitly
 assumed by the approach using the covariance matrix.
 
-Remember the trace? It shows also shows the dependence between two parameters::
+The trace returned as the optional second argument from
+:func:`conf_interval` contains a dictionary for each variable parameter.
+The values are dictionaries with arrays of values for each variable, and an
+array of corresponding probabilities for the corresponding cumulative variables.  This
+can be used to show the dependence between two parameters::
 
     >>> x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
     >>> x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
diff --git a/doc/constraints.rst b/doc/constraints.rst
index ee5ca88..7d06b8d 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -6,14 +6,32 @@ Using Mathematical Constraints
 
 .. _asteval: http://newville.github.io/asteval/
 
-While being able to fix variables and place upper and lower bounds on their
-values are key parts of lmfit, the ability to place mathematical
-constraints on parameters is also highly desirable.  This section describes
-how to do this, and what sort of parameterizations are possible -- see
-the `asteval`_ for further documentation.
+Being able to fix variables to a constant value or place upper and lower
+bounds on their values can greatly simplify modeling real data.  These 
+capabilities are key to lmfit's Parameters.  In addition, it is sometimes 
+highly desirable to place mathematical constraints on parameter values.  
+For example, one might want to require that two Gaussian peaks have the 
+same width, or have amplitudes that are constrained to add to some value.  
+Of course, one could rewrite the objective or model function to place such 
+requirements, but this is somewhat error prone, and limits the flexibility 
+so that exploring constraints becomes laborious.
+
+To simplify the setting of constraints, Parameters can be assigned a 
+mathematical expression of other Parameters, builtin constants, and builtin 
+mathematical functions that will be used to determine its value.  The 
+expressions used for constraints are evaluated using the `asteval`_ module, 
+which uses Python syntax, and evaluates the constraint expressions in a safe 
+and isolated  namespace.
+
+This approach to mathematical constraints allows one to not have to write a 
+separate model function for two Gaussians where the two ``sigma`` values are 
+forced to be equal, or where amplitudes are related.  Instead, one can write a
+more general two Gaussian model (perhaps using :class:`GaussianModel`) and 
+impose such constraints on the Parameters for a particular fit. 
+
 
 Overview
-===========
+===============
 
 Just as one can place bounds on a Parameter, or keep it fixed during the
 fit, so too can one place mathematical constraints on parameters.  The way
@@ -136,13 +154,13 @@ The `asteval`_ interpreter uses a flat namespace, implemented as a single
 dictionary. That means you can preload any Python symbol into the namespace
 for the constraints::
 
-    def lorentzian(x, amp, cen, wid):
+    def mylorentzian(x, amp, cen, wid):
         "lorentzian function: wid = half-width at half-max"
         return (amp  / (1 + ((x-cen)/wid)**2))
 
     fitter = Minimizer()
-    fitter.asteval.symtable['lorenztian'] = lorenztian
+    fitter.asteval.symtable['lorentzian'] = mylorentzian
 
-and this :meth:`lorenztian` function can now be used in constraint
+and this :meth:`lorentzian` function can now be used in constraint
 expressions.
 
diff --git a/doc/extensions.py b/doc/extensions.py
new file mode 100644
index 0000000..40de659
--- /dev/null
+++ b/doc/extensions.py
@@ -0,0 +1,10 @@
+# sphinx extensions for mathjax
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.intersphinx',
+              'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(mathjax)
diff --git a/doc/fitting.rst b/doc/fitting.rst
index eb56637..6bd8aa5 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -4,30 +4,31 @@
 Performing Fits, Analyzing Outputs
 =======================================
 
-As shown in the introduction, a simple fit can be performed with
-the :func:`minimize` function.    For more sophisticated modeling,
-the :class:`Minimizer` class can be used to gain a bit more control,
-especially when using complicated constraints.
+As shown in the previous chapter, a simple fit can be performed with the
+:func:`minimize` function.  For more sophisticated modeling, the
+:class:`Minimizer` class can be used to gain a bit more control, especially
+when using complicated constraints.
 
 
 The :func:`minimize` function
 ===============================
 
-The minimize function takes a function to minimize, a dictionary of
-:class:`Parameter` , and several optional arguments.    See
-:ref:`fit-func-label` for details on writing the function to minimize.
+The minimize function takes a objective function (the function that
+calculates the array to be minimized), a :class:`Parameters` ordered
+dictionary, and several optional arguments.  See :ref:`fit-func-label` for
+details on writing the function to minimize.
 
 .. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **leastsq_kws]]]]]])
 
-   find values for the params so that the sum-of-squares of the returned array
-   from function is minimized.
+   find values for the ``params`` so that the sum-of-squares of the array returned
+   from ``function`` is minimized.
 
    :param function:  function to return fit residual.  See :ref:`fit-func-label` for details.
    :type  function:  callable.
-   :param params:  a dictionary of Parameters.  Keywords must be strings
+   :param params:  a :class:`Parameters` dictionary.  Keywords must be strings
                    that match ``[a-z_][a-z0-9_]*`` and is not a python
                    reserved word.  Each value must be :class:`Parameter`.
-   :type  params:  dict
+   :type  params:  dict or :class:`Parameters`.
    :param args:  arguments tuple to pass to the residual function as  positional arguments.
    :type  args:  tuple
    :param kws:   dictionary to pass to the residual function as keyword arguments.
@@ -48,15 +49,20 @@ The minimize function takes a function to minimize, a dictionary of
    appropriate, estimated uncertainties and correlations.  See
    :ref:`fit-results-label` for further details.
 
+   If provided, the ``iter_cb`` function should take arguments of ``params,
+   iter, resid, *args, **kws``, where ``params`` will have the current
+   parameter values, ``iter`` the iteration, ``resid`` the current residual
+   array, and ``*args`` and ``**kws`` as passed to the objective function.
+
 ..  _fit-func-label:
 
 Writing a Fitting Function
 ===============================
 
-An important component of a fit is writing a function to be minimized in
-the least-squares sense.   Since this function will be called by other
+An important component of a fit is writing a function to be minimized --
+the *objective function*.  Since this function will be called by other
 routines, there are fairly stringent requirements for its call signature
-and return value.   In principle, your function can be any python callable,
+and return value.  In principle, your function can be any python callable,
 but it must look like this:
 
 .. function:: func(params, *args, **kws):
@@ -85,16 +91,17 @@ method, effectively doing a least-squares optimization of the return values.
 
 
 Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable
-to unpack these to get numerical values at the top of the function.  A simple example
-would look like::
+to unpack these to get numerical values at the top of the function.  A
+simple way to do this is with :meth:`Parameters.valuesdict`, as with::
+
 
     def residual(pars, x, data=None, eps=None):
         # unpack parameters:
         #  extract .value attribute for each parameter
-        amp = pars['amp'].value
-        period = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
+	parvals = pars.valuesdict()
+        period = parvals['period']
+        shift = parvals['shift']
+        decay = parvals['decay']
 
         if abs(shift) > pi/2:
             shift = shift - sign(shift)*pi
@@ -102,7 +109,7 @@ would look like::
         if abs(period) < 1.e-10:
             period = sign(period)*1.e-10
 
-        model = amp * sin(shift + x/period) * exp(-x*x*decay*decay)
+        model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
 
         if data is None:
             return model
@@ -110,20 +117,23 @@ would look like::
             return (model - data)
         return (model - data)/eps
 
-In this example, ``x`` is a positional (required) argument, while the ``data``
-array is actually optional (so that the function returns the model calculation
-if the data is neglected).   Also note that the model calculation will divide
-``x`` by the varied value of the 'period' Parameter.  It might be wise to
-make sure this parameter cannot be 0.   It would be possible to use the bounds
-on the :class:`Parameter` to do this::
+In this example, ``x`` is a positional (required) argument, while the
+``data`` array is actually optional (so that the function returns the model
+calculation if the data is neglected).  Also note that the model
+calculation will divide ``x`` by the value of the 'period' Parameter.  It
+might be wise to ensure this parameter cannot be 0.  It would be possible
+to use the bounds on the :class:`Parameter` to do this::
 
     params['period'] = Parameter(value=2, min=1.e-10)
 
-but might be wiser to put this directly in the function with::
+but putting this directly in the function with::
 
         if abs(period) < 1.e-10:
             period = sign(period)*1.e-10
 
+is also a reasonable approach.   Similarly, one could place bounds on the
+``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
+
 ..  _fit-methods-label:
 
 Choosing Different Fitting Methods
@@ -137,16 +147,10 @@ being fast, and well-behaved for most curve-fitting needs, and making it
 easy to estimate uncertainties for and correlations between pairs of fit
 variables, as discussed in :ref:`fit-results-label`.
 
-Alternative algorithms can also be used. These include `simulated annealing
-<http://en.wikipedia.org/wiki/Simulated_annealing>`_ which promises a
-better ability to avoid local minima, and `BFGS
-<http://en.wikipedia.org/wiki/Limited-memory_BFGS>`_, which is a
-modification of the quasi-Newton method.
-
-To select which of these algorithms to use, use the ``method`` keyword to the
-:func:`minimize` function or use the corresponding method name from the
-:class:`Minimizer` class as listed in the
-:ref:`Table of Supported Fitting Methods <fit-methods-table>`.
+Alternative algorithms can also be used by providing the ``method`` keyword
+to the :func:`minimize` function or use the corresponding method name from
+the :class:`Minimizer` class as listed in the :ref:`Table of Supported
+Fitting Methods <fit-methods-table>`.
 
 .. _fit-methods-table:
 
@@ -162,8 +166,6 @@ To select which of these algorithms to use, use the ``method`` keyword to the
  +-----------------------+--------------------+---------------------+-------------------------+
  | L-BFGS-B              |  ``lbfgsb``        | :meth:`lbfgsb`      | ``L-BFGS-B``            |
  +-----------------------+--------------------+---------------------+-------------------------+
- | Simulated Annealing   |  ``anneal``        | :meth:`anneal`      | ``Anneal``              |
- +-----------------------+--------------------+---------------------+-------------------------+
  | Powell                |  ``powell``        |                     | ``Powell``              |
  +-----------------------+--------------------+---------------------+-------------------------+
  | Conjugate Gradient    |  ``cg``            |                     | ``CG``                  |
@@ -172,16 +174,20 @@ To select which of these algorithms to use, use the ``method`` keyword to the
  +-----------------------+--------------------+---------------------+-------------------------+
  | COBYLA                |  ``cobyla``        |                     |  ``COBYLA``             |
  +-----------------------+--------------------+---------------------+-------------------------+
+ | COBYLA                |  ``cobyla``        |                     |  ``COBYLA``             |
+ +-----------------------+--------------------+---------------------+-------------------------+
+ | Truncated Newton      |  ``tnc``           |                     |  ``TNC``                |
+ +-----------------------+--------------------+---------------------+-------------------------+
+ | Trust Newton-CGn      |  ``trust-ncg``     |                     |  ``trust-ncg``          |
+ +-----------------------+--------------------+---------------------+-------------------------+
+ | Dogleg                |  ``dogleg``        |                     |  ``dogleg``             |
+ +-----------------------+--------------------+---------------------+-------------------------+
  | Sequential Linear     |  ``slsqp``         |                     |  ``SLSQP``              |
  | Squares Programming   |                    |                     |                         |
  +-----------------------+--------------------+---------------------+-------------------------+
 
 .. note::
 
-   Use of :meth:`scipy.optimize.minimize` requires scipy 0.11 or higher.
-
-.. note::
-
    The objective function for the Levenberg-Marquardt method **must**
    return an array, with more elements than variables.  All other methods
    can return either a scalar value or an array.
@@ -189,14 +195,11 @@ To select which of these algorithms to use, use the ``method`` keyword to the
 
 .. warning::
 
-  The Levenberg-Marquardt method is *by far* the most tested fit method,
-  and much of this documentation assumes that this is the method used.  For
-  example, many of the fit statistics and estimates for uncertainties in
-  parameters discussed in :ref:`fit-results-label` are done only for the
-  ``leastsq`` method.
+  Much of this documentation assumes that the Levenberg-Marquardt method is
+  the method used.  Many of the fit statistics and estimates for
+  uncertainties in parameters discussed in :ref:`fit-results-label` are
+  done only for this method.
 
-In particular, the simulated annealing method appears to not work
-correctly.... understanding this is on the ToDo list.
 
 ..  _fit-results-label:
 
@@ -273,6 +276,7 @@ near the maximum or minimum value makes the covariance matrix singular.  In
 these cases, the :attr:`errorbars` attribute of the fit result
 (:class:`Minimizer` object) will be ``False``.
 
+.. module:: Minimizer
 
 ..  _fit-minimizer-label:
 
@@ -426,53 +430,21 @@ Getting and Printing Fit Reports
 
    print text of report from :func:`fit_report`.
 
-An example fit with an error report::
-
-    p_true = Parameters()
-    p_true.add('amp', value=14.0)
-    p_true.add('period', value=5.33)
-    p_true.add('shift', value=0.123)
-    p_true.add('decay', value=0.010)
-
-    def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
-
-        if abs(shift) > pi/2:
-            shift = shift - sign(shift)*pi
-        model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
-        if data is None:
-            return model
-        return (model - data)
-
-    n = 2500
-    xmin = 0.
-    xmax = 250.0
-    noise = random.normal(scale=0.7215, size=n)
-    x     = linspace(xmin, xmax, n)
-    data  = residual(p_true, x) + noise
 
-    fit_params = Parameters()
-    fit_params.add('amp', value=13.0)
-    fit_params.add('period', value=2)
-    fit_params.add('shift', value=0.0)
-    fit_params.add('decay', value=0.02)
+An example fit with report would be
 
-    out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+.. literalinclude:: ../examples/doc_withreport.py
 
-    fit = residual(fit_params, x)
-    report_errors(fit_params)
+which would write out::
 
-would generate this report::
 
     [[Variables]]
-         amp:        13.969724 +/- 0.050145 (0.36%) initial =  13.000000
-         decay:      0.009990 +/- 0.000042 (0.42%) initial =  0.020000
-         period:     5.331423 +/- 0.002788 (0.05%) initial =  2.000000
-         shift:      0.125333 +/- 0.004938 (3.94%) initial =  0.000000
+        amp:      13.9121944 +/- 0.141202 (1.01%) (init= 13)
+        decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
+        period:   5.48507044 +/- 0.026664 (0.49%) (init= 2)
+        shift:    0.16203677 +/- 0.014056 (8.67%) (init= 0)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(period, shift)             =  0.800
-        C(amp, decay)                =  0.576
+        C(period, shift)             =  0.797
+        C(amp, decay)                =  0.582
+
 
diff --git a/doc/index.rst b/doc/index.rst
index 0167344..9356c84 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -5,53 +5,42 @@ Non-Linear Least-Square Minimization and Curve-Fitting for Python
 
 .. _Levenberg-Marquardt:     http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
 .. _MINPACK-1:               http://en.wikipedia.org/wiki/MINPACK
-.. _Nelder-Mead:             http://en.wikipedia.org/wiki/Nelder-Mead_method
-
-The lmfit python package provides a simple and flexible interface to
-non-linear optimization and curve fitting problems.  Lmfit extends the
-optimization capabilities of :mod:`scipy.optimize`.  Initially designed to
-extend the the `Levenberg-Marquardt`_ algorithm in
-:func:`scipy.optimize.minimize.leastsq`, lmfit supports most of the
-optimization methods from :mod:`scipy.optimize`.  It also provides a simple
-way to apply this extension to *curve fitting* problems.
-
-The key concept in lmfit is that instead of using plain floating pointing
-values for the variables to be optimized (as all the optimization routines
-in :mod:`scipy.optimize` use), optimizations are done using
-:class:`Parameter` objects.  A :class:`Parameter` can have its value fixed
-or varied, have upper and/or lower bounds placed on its value, or have
-values that are evaluated from algebraic expressions of other Parameter
-values.  This is all done outside the optimization routine, so that these
-bounds and constraints can be applied to **all** optimization routines from
-:mod:`scipy.optimize`, and with a more Pythonic interface than any of the
-routines that do provide bounds.
-
-By using :class:`Parameter` objects instead of plain variables, the
-objective function does not have to be rewritten to reflect every change of
-what is varied in the fit, or if relationships or constraints are placed on
-the Parameters.  This simplifies the writing of models, and gives the user
-more flexibility in using and testing variations of that model.
-
-
-Lmfit supports several of the optimization methods from
-:mod:`scipy.optimize`.  The default, and by far best tested optimization
-method used (and the origin of the name) is the `Levenberg-Marquardt`_
-algorithm of :func:`scipy.optimize.leastsq` and
-:func:`scipy.optimize.curve_fit`.  Much of this document assumes this
-algorithm is used unless explicitly stated.  An important point for many
-scientific analysis is that this is only method that automatically
-estimates uncertainties and correlations between fitted variables from the
-covariance matrix calculated during the fit. Because the approach derived
-from `MINPACK-1`_ using the covariance matrix to determine uncertainties is
-sometimes questioned (and sometimes rightly so), lmfit supports methods to
-do a brute force search of the confidence intervals and correlations for
-sets of parameters.
+
+
+Lmfit provides a high-level interface to non-linear optimization and
+curve fitting problems for Python. Lmfit builds on
+`Levenberg-Marquardt`_ algorithm of :func:`scipy.optimize.leastsq`, but
+also supports most of the optimization methods from :mod:`scipy.optimize`.
+It has a number of useful enhancements, including:
+
+  * Using :class:`Parameter` objects instead of plain floats as variables.
+    A :class:`Parameter` has a value that can be varied in the fit, fixed,
+    have upper and/or lower bounds.  It can even have a value that is
+    constrained by an algebraic expression of other Parameter values.
+
+  * Ease of changing fitting algorithms.  Once a fitting model is set up,
+    one can change the fitting algorithm without changing the objective
+    function.
+
+  * Improved estimation of confidence intervals.  While
+    :func:`scipy.optimize.leastsq` will automatically calculate
+    uncertainties and correlations from the covariance matrix, lmfit also
+    has functions to explicitly explore parameter space to determine
+    confidence levels even for the most difficult cases.
+
+  * Improved curve-fitting with the :class:`Model` class.  This which
+    extends the capabilities of :func:`scipy.optimize.curve_fit`, allowing
+    you to turn a function that models for your data into a python class
+    that helps you parametrize and fit data with that model.
+
+  * Many :ref:`pre-built models <builtin_models_chapter>` for common
+    lineshapes are included and ready to use.
 
 .. _lmfit github repository:   http://github.com/lmfit/lmfit-py
 
-The lmfit package is an open-source project, and this document are a works
-in progress.  If you are interested in participating in this effort please
-use the `lmfit github repository`_.
+The lmfit package is Free software, using an MIT license.  The software and
+this document are works in progress.  If you are interested in
+participating in this effort please use the `lmfit github repository`_.
 
 
 .. toctree::
diff --git a/doc/installation.rst b/doc/installation.rst
index 2c5ed93..9a90056 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -2,25 +2,27 @@
 Downloading and Installation
 ====================================
 
+.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
+.. _Python Setup Tools:        http://pypi.python.org/pypi/setuptools
+.. _pip:  https://pip.pypa.io/
+.. _nose: http://nose.readthedocs.org/
+
 Prerequisites
 ~~~~~~~~~~~~~~~
 
 The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.13 or
 higher is recommended, but extensive testing on compatibility with various
-versions of scipy has not been done.  Lmfit does work with Python 2.7, 3.2,
-and 3.3.  No testing has been done with Python 3.4, but as the package is
-pure Python, relying only on scipy and numpy, no significant troubles are
-expected.  Nose is required for running the test suite, and IPython and
-matplotib are recommended.  If Pandas is available, it will be used in
-portions of lmfit.
+versions of scipy has not been done.  Lmfit does work with Python 2.7, and
+3.2 and 3.3.  No testing has been done with Python 3.4, but as the package
+is pure Python, relying only on scipy and numpy, no significant troubles
+are expected.  The `nose`_ frameworkt is required for running the test
+suite, and IPython and matplotib are recommended.  If Pandas is available,
+it will be used in portions of lmfit.
 
 
 Downloads
 ~~~~~~~~~~~~~
 
-.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
-.. _Python Setup Tools:        http://pypi.python.org/pypi/setuptools
-.. _pip:  https://pip.pypa.io/
 
 The latest stable version of lmfit is  available from `PyPi <http://pypi.python.org/pypi/lmfit/>`_.
 
@@ -54,29 +56,43 @@ and install using::
    python setup.py install
 
 
+
+Testing
+~~~~~~~~~~
+
+A battery of tests scripts that can be run with the `nose`_ testing
+framework is distributed with lmfit in the ``tests`` folder.  These are
+routinely run on the development version.  Running ``nosetests`` should run
+all of these tests to completion without errors or failures.
+
+Many of the examples in this documentation are distributed with lmfit in
+the ``examples`` folder, and sould also run for you.  Many of these require
+
+
 Acknowledgements
 ~~~~~~~~~~~~~~~~~~
 
 LMFIT was originally written by Matthew Newville.  Substantial code and
 documentation improvements, especially for improved estimates of confidence
-intervals was provided by Till Stensitzki.  The implementation of parameter
-bounds as described in the MINUIT documentation is taken from Jonathan
-J. Helmus' leastsqbound code, with permission.  The code for propagation of
-uncertainties is taken from Eric O. Le Bigot's uncertainties package, with
-permission.  Much of the work on improved unit testing and high-level model
-functions was done by Daniel B. Allen.  Many valuable suggestions for
-improvements have come from Christoph Deil.  The code obviously depends on,
-and owes a very large debt to the code in scipy.optimize.  Several
-discussions on the scipy mailing lists have also led to improvements in
-this code.
+intervals was provided by Till Stensitzki.  Much of the work on improved
+unit testing and high-level model functions was done by Daniel B. Allen,
+with substantial input from Antonino Ingargiola.  Many valuable suggestions
+for improvements have come from Christoph Deil.  The implementation of
+parameter bounds as described in the MINUIT documentation is taken from
+Jonathan J. Helmus' leastsqbound code, with permission.  The code for
+propagation of uncertainties is taken from Eric O. Le Bigot's uncertainties
+package, with permission.  The code obviously depends on, and owes a very
+large debt to the code in scipy.optimize.  Several discussions on the scipy
+mailing lists have also led to improvements in this code.
 
 License
 ~~~~~~~~~~~~~
 
 The LMFIT-py code is distribution under the following license:
 
-  Copyright (c) 2012 Matthew Newville, The University of Chicago
-                     Till Stensitzki, Freie Universitat Berlin
+  Copyright (c) 2014 Matthew Newville, The University of Chicago, Till
+  Stensitzki, Freie Universitat Berlin, Daniel B. Allen, Johns Hopkins
+  University, Antonino Ingargiola, University of California, Los Angeles
 
   Permission to use and redistribute the source code or binary forms of this
   software and its documentation, with or without modification is hereby
diff --git a/doc/model.rst b/doc/model.rst
index c741fb9..7ece86d 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -4,27 +4,32 @@
 Modeling Data and Curve Fitting
 =================================================
 
-A very common application of least-squares minimization is *curve fitting*,
-where one has a parametrized model function meant to explain some
-phenomena, and wants to adjust the numerical values for the model to
-most closely match some particular data.  Within the :mod:`scipy` world,
-such curve fitting problems are commonly solved with
-:func:`scipy.optimize.curve_fit`, which simply calls
-:func:`scipy.optimize.leastsq`.  As lmfit is a high-level wrapper around
-:func:`scipy.optimize.leastsq`, it can be used for curve-fitting problems,
-but here we discuss an even easier way to do it that is closer in spirit to
-:func:`scipy.optimize.curve_fit`, but better.
-
-The :class:`Model` class makes it easy to turn a model function that
-calculates a model for your data into a fitting model.  In an effort to
-make simple things truly simple, lmfit also provides canonical definitions
-for many known lineshapes such as Gaussian or Lorentzian peaks and
-Exponential decays that are widely used in many scientific domains.  These
-are available in the :mod:`models` module that will be discussed in more
-detail in the next chapter (:ref:`builtin_models_chapter`).  We mention it
-here as you may want to consult that list before writing your own model.
-For now, we focus on turning python function into high-level fitting models
-with the :class:`Model` class, and using these to fit data.
+
+A common use of least-squares minimization is *curve fitting*, where one
+has a parametrized model function meant to explain some phenomena and wants
+to adjust the numerical values for the model to most closely match some
+data.  With :mod:`scipy`, such problems are commonly solved with
+:func:`scipy.optimize.curve_fit`, which is a wrapper around
+:func:`scipy.optimize.leastsq`.  Since Lmit's :func:`minimize` is also a
+high-level wrapper around :func:`scipy.optimize.leastsq` it can be used for
+curve-fitting problems, but requires more effort than using
+:func:`scipy.optimize.curve_fit`.
+
+Here we discuss lmfit's :class:`Model` class.  This takes a model function
+-- a function that calculates a model for some data -- and provides methods
+to create parameters for that model and to fit data using that model
+function.  This is closer in spirit to :func:`scipy.optimize.curve_fit`,
+but with the advantages of using :class:`Parameters` and lmfit.
+
+In addition to allowing you turn any model function into a curve-fitting
+method, Lmfit also provides canonical definitions for many known lineshapes
+such as Gaussian or Lorentzian peaks and Exponential decays that are widely
+used in many scientific domains.  These are available in the :mod:`models`
+module that will be discussed in more detail in the next chapter
+(:ref:`builtin_models_chapter`).  We mention it here as you may want to
+consult that list before writing your own model.  For now, we focus on
+turning python function into high-level fitting models with the
+:class:`Model` class, and using these to fit data.
 
 
 Example: Fit data to Gaussian profile
@@ -33,18 +38,17 @@ Example: Fit data to Gaussian profile
 Let's start with a simple and common example of fitting data to a Gaussian
 peak.  As we will see, there is a buit-in :class:`GaussianModel` class that
 provides a model function for a Gaussian profile, but here we'll build our
-own.  We start with a definition the model function that we might want to
-use to fit to some data::
+own.  We start with a simple definition the model function:
 
     >>> from numpy import sqrt, pi, exp, linspace
     >>>
     >>> def gaussian(x, amp, cen, wid):
-    ...    "1-d gaussian: gaussian(x, amp, cen, wid)"
-    ...    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+    ...    return amp * exp(-(x-cen)**2 /wid)
     ...
 
-To some data :math:`y(x)` represented by the arrays ``y`` and ``x`` with we
-would do something like::
+that we want to use to fit to some data :math:`y(x)` represented by the
+arrays ``y`` and ``x``.  Using :func:`scipy.optimize.curve_fit` makes this
+easy to do, allowing us to do something like::
 
     >>> from scipy.optimize import curve_fit
     >>>
@@ -56,61 +60,75 @@ would do something like::
 
 
 That is, we read in data from somewhere, make an initial guess of the model
-values, and run ``curve_fit`` with the model function, data arrays, and
-initial guesses.  The results returned are the optimal values for the
-parameters and the covariance matrix.   It's pretty simple to do, but
-misses many of the key benefits of lmfit.
+values, and run :func:`scipy.optimize.curve_fit` with the model function,
+data arrays, and initial guesses.  The results returned are the optimal
+values for the parameters and the covariance matrix.  It's simple and very
+useful.  But it misses the benefits of lmfit.
 
 
-To solve this with lmfit we could write a residual function but such a
-residual function would be fairly simple (essentially, ``data - model``,
+To solve this with lmfit we would have to write an objective function. But
+such a function would be fairly simple (essentially, ``data - model``,
 possibly with some weighting), and we would need to define and use
-appropriately named parameters.  Though convenient, it also becomes
-somewhat of a burden to keep all the parameter names straight.  After doing
-this a few times it appears as a recurring pattern, and we can imagine
-automating this process.  That's where the :class:`Model` class comes in.
-We can pass this class the ``gaussian`` function, and it will automatically
-generate the appropriate residual function and the corresponding parameters
-from the function signature itself::
+appropriately named parameters.  Though convenient, it is somewhat of a
+burden to keep the named parameter straight (on the other hand, with
+func:`scipy.optimize.curve_fit` you are required to remember the parameter
+order).  After doing this a few times it appears as a recurring pattern,
+and we can imagine automating this process.  That's where the
+:class:`Model` class comes in.
+
+The :class:`Model` allows us to easily wrap a model function such as the
+``gaussian`` function.  This automatically generate the appropriate
+residual function, and determines the corresponding parameter names from
+the function signature itself::
 
     >>> from lmfit import Model
     >>> gmod = Model(gaussian)
-    >>> for name, par in gmod.params.items():
-    ...     print(name, par)
-    ...
-    'amp', <Parameter 'amp', None, bounds=[None:None]>
-    'wid', <Parameter 'wid', None, bounds=[None:None]>
-    'cen', <Parameter 'cen', None, bounds=[None:None]>
-    >>> print("Independent Variables: ", gmod.independent_vars)
-    'Independent Variables: ', ['x']
-
-The Model ``gmod`` is constructed to have a ``params`` member that holds the
-:class:`Parameters` for the model, and an ``independent_vars`` that holds
-the name of the independent variables.  By default, the first argument of
-the function is taken as the independent variable, and the rest of the
-parameters are used for variable Parameters.  Thus, for the ``gaussian``
+    >>> gmod.param_names
+    set(['amp', 'wid', 'cen'])
+    >>> gmod.independent_vars)
+    ['x']
+
+The Model ``gmod`` knows the names of the parameters and the independent
+variables.  By default, the first argument of the function is taken as the
+independent variable, held in :attr:`independent_vars`, and the rest of the
+functions positional arguments (and, in certain cases, keyword arguments --
+see below) are used for Parameter names.  Thus, for the ``gaussian``
 function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
-``x`` is the independent variable -- taken directly from the signature of
-the model function.
+``x`` is the independent variable -- all taken directly from the signature
+of the model function. As we will see below, you can specify what the
+independent variable is, and you can add or alter parameters too.
+
+On creation of the model, parameters are *not* created.  The model knows
+what the parameters should be named, but not anything about the scale and
+range of your data.  You will normally have to make these parameters and
+assign initiald values and other attributes.  To help you do this, each
+model has a :meth:`make_params` method that will generate parameters with
+the expected names:
+
+    >>> params = gmod.make_params()
 
-On creation of the model, the parameters are not initialized (the values
-are all ``None``), and will need to be given initial values before the
-model can be used.  This can be done in one of two ways, or a mixture of
-the two.  First, the initial values for the models parameters can be set
-explicitly, as with:
+This creates the :class:`Parameters` but doesn't necessarily give them
+initial values -- again, the model has no idea what the scale should be.
+You can set initial values for parameters with keyword arguments to
+:meth:`make_params`, as with:
 
-    >>> gmod.params['amp'].value = 10.0
 
-and so on.  This is also useful to setting parameter bounds and so forth.
-Alternatively, one can use the :meth:`eval` method (to evaluate the model)
-or the :meth:`fit` method (to fit data to this model) with explicit keyword
-arguments for the parameter values.  For example, one could use
-:meth:`eval` to calculate the predicted function::
+    >>> params = gmod.make_params(cen=5, amp=200, wid=1)
+
+or assign them (and other parameter properties) after the
+:class:`Parameters` has been created.
+
+A :class:`Model` has several methods associated with it.  For example, one
+can use the :meth:`eval` method to evaluate the model or the :meth:`fit`
+method to fit data to this model with a :class:`Parameter` object.  Both of
+these methods can take explicit keyword arguments for the parameter values.
+For example, one could use :meth:`eval` to calculate the predicted
+function::
 
     >>> x = linspace(0, 10, 201)
     >>> y = gmod.eval(x=x, amp=10, cen=6.2, wid=0.75)
 
-So far, this is a slightly long-winded way to calculate a Gaussian
+Admittedly, this a slightly long-winded way to calculate a Gaussian
 function.   But now that the model is set up, we can also use its
 :meth:`fit` method to fit this model to data, as with::
 
@@ -121,19 +139,28 @@ Putting everything together, the script to do such a fit (included in the
 
 .. literalinclude:: ../examples/doc_model1.py
 
-which is pretty compact and to the point.  Of course, the parameter in the
-returned ``result`` have pulled apart the covariance matrix, so that the
-results printed out are::
+which is pretty compact and to the point.  The returned ``result`` will be
+a :class:`ModelFit` object.  As we will see below, this has many
+components, including a :meth:`fit_report` method, which will show::
 
+    [[Model]]
+        gaussian
+    [[Fit Statistics]]
+        # function evals   = 33
+        # data points      = 101
+        # variables        = 3
+        chi-square         = 3.409
+        reduced chi-square = 0.035
     [[Variables]]
-         amp:     8.880218 +/- 0.1135949 (1.28%) initial =  5
-         cen:     5.658661 +/- 0.01030495 (0.18%) initial =  5
-         wid:     0.6976547 +/- 0.01030495 (1.48%) initial =  1
-    [[Correlations]] (unreported correlations are <  0.250)
-         C(amp, wid)                  =  0.577
-
+        amp:   8.88021829 +/- 0.113594 (1.28%) (init= 5)
+        cen:   5.65866102 +/- 0.010304 (0.18%) (init= 5)
+        wid:   0.69765468 +/- 0.010304 (1.48%) (init= 1)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(amp, wid)                  =  0.577
 
-and the plot generated gives:
+The result will also have :attr:`init_fit` for the fit with the initial
+parameter values and a :attr:`best_fit` for the fit with the best fit
+parameter values.  These can be used to generate the following plot:
 
 
 .. image:: _images/model_fit1.png
@@ -141,83 +168,60 @@ and the plot generated gives:
    :width: 50%
 
 which shows the data in blue dots, the best fit as a solid red line, and
-the initial fit in black dashed line.
+the initial fit as a dashed black line.
 
 We emphasize here that the fit to this model function was really performed
-with 2 lines of code.  These lines clearly express that we want to turn the
-``gaussian`` function into a fitting model, and then fit the :math:`y(x)`
-data to this model, starting with values of 5 for ``amp``, 5 for ``cen``
-and 1 for ``wid``::
+with 2 lines of code::
 
     gmod = Model(gaussian)
     result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
 
-which compares well to :func:`scipy.optimize.curve_fit`::
+These lines clearly express that we want to turn the ``gaussian`` function
+into a fitting model, and then fit the :math:`y(x)` data to this model,
+starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``, and
+compare well to :func:`scipy.optimize.curve_fit`::
 
     best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
 
-except that all the other features of lmfit are included.
-
-Some model functions may be more complicated than the Gaussian function
-here.  We'll discuss these below, but for now we've shown that at least the
-wrapping of a simple model function for curve fitting is easy.
+except that all the other features of lmfit are included such as that the
+:class:`Parameters` can have bounds and constraints and the result is a
+richer object that can be reused to explore the fit in more detail.
 
+.. module:: model
 
 The :class:`Model` class
 =======================================
 
-.. module:: model
-
 The :class:`Model` class provides a general way to wrap a pre-defined
 function as a fitting model.
 
-.. class:: Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix='' [, components=None]]]]])
+.. class::  Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix='' [, name=None[, **kws]]]]]])
 
     Create a model based on the user-supplied function.  This uses
     introspection to automatically converting argument names of the
     function to Parameter names.
 
-    :param func: function to be wrapped
+    :param func: model function to be wrapped
     :type func: callable
     :param independent_vars: list of argument names to ``func`` that are independent variables.
     :type independent_vars: ``None`` (default) or list of strings.
     :param param_names: list of argument names to ``func`` that should be made into Parameters.
     :type param_names: ``None`` (default) or list of strings
     :param missing: how to handle missing values.
-    :type missing: one of ``None`` (default), 'drop', or 'raise'
+    :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'.
     :param prefix: prefix to add to all parameter names to distinguish components.
     :type prefix: string
-    :param components: list of model components for a composite fit (usually handled internally).
-    :type components: ``None`` or default.
+    :param name: name for the model. When ``None`` (default) the name is the same as the model function (``func``).
+    :type name: ``None`` or string.
+    :param kws:   addtional keyword arguments to pass to model function.
 
 
-Methods and Attributes of the :class:`Model` class
-----------------------------------------------------
-
-.. method:: guess_starting_values(data, **kws)
-
-   by default this is left to raise a ``NotImplementedError``, but may be
-   overwritten by subclasses.  Generally, this method should take some
-   values for ``data`` and use it to construct reasonable starting values for
-   the parameters.
-
-.. method:: set_paramval(parname, value[, min=None[, max=None[, vary=True]]])
-
-   set the value for a named parameter.  This is convenient for setting
-   initial values.  The ``parname`` can include the models ``prefix`` or
-   not.
-
-   :param parname: parameter name.
-   :type parname: string
-   :param value: value for parameter
-   :type value: float
-   :param min:  lower bound for parameter value
-   :type min: ``None`` or float
-   :param max:  upper bound for parameter value
-   :type max: ``None`` or float
-   :param vary:  whether to vary parameter in fit.
-   :type vary: boolean
+    Of course, the model function will have to return an array that will be
+    the same size as the data being modeled.  Generally this is handled by
+    also specifying one or more independent variables.
 
+:class:`Model` class Methods
+---------------------------------
 
 .. method:: eval(params=None[, **kws])
 
@@ -225,19 +229,23 @@ Methods and Attributes of the :class:`Model` class
 
    :param params: parameters to use for fit.
    :type params: ``None`` (default) or Parameters
-
+   :param kws:    addtional keyword arguments to pass to model function.
    :return:       ndarray for model given the parameters and other arguments.
 
-   If ``params`` is ``None``, the internal ``params`` will be used.
+   If ``params`` is ``None``, the values for all parameters are expected to
+   be provided as keyword arguments.  If ``params`` is given, and a keyword
+   argument for a parameter value is also given, the keyword argument will
+   be used.
 
-   Note that all other arguments for the model function (including all the
-   independent variables!) will need to be passed in using keyword
-   arguments.
+   Note that all non-parameter arguments for the model function --
+   **including all the independent variables!** -- will need to be passed
+   in using keyword arguments.
 
 
 .. method:: fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
 
-   perform a fit of the model to the ``data`` array.
+   perform a fit of the model to the ``data`` array with a set of
+   parameters.
 
    :param data: array of data to be fitted.
    :type data: ndarray-like
@@ -251,8 +259,10 @@ Methods and Attributes of the :class:`Model` class
    :type  scale_covar:  bool (default ``True``)
    :param iter_cb:  function to be called at each fit iteration
    :type  iter_cb:  callable or ``None``
-
-   :return:       fit result object.
+   :param verbose:  print a message when a new parameter is created due to a *hint*
+   :type  verbose:  bool (default ``True``)
+   :param kws:      addtional keyword arguments to pass to model function.
+   :return:         :class:`ModeFitResult` object.
 
    If ``params`` is ``None``, the internal ``params`` will be used. If it
    is supplied, these will replace the internal ones.  If supplied,
@@ -263,57 +273,79 @@ Methods and Attributes of the :class:`Model` class
    independent variables!) will need to be passed in using keyword
    arguments.
 
-   The result returned from :meth:`fit` will contains all of the items
-   returned from :func:`minimize` (see  :ref:`Table of Fit Results
-   <goodfit-table>` plus those listed in the :ref:`Table of Model Fit results <modelfit-table>`
 
-.. method:: fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]])
+.. method:: guess(data, **kws)
 
-   return result of :func:`fit_report` after completing :meth:`fit`.
+   Guess starting values for model parameters.
 
+    :param data: data array used to guess parameter values
+    :type func:  ndarray
+    :param kws:  addtional options to pass to model function.
+    :return: :class:`Parameters` with guessed initial values for each parameter.
 
-.. _modelfit-table:
+   by default this is left to raise a ``NotImplementedError``, but may be
+   overwritten by subclasses.  Generally, this method should take some
+   values for ``data`` and use it to construct reasonable starting values for
+   the parameters.
 
-Table of Model Fit Results: These values are included in the return value
-from :meth:`Model.fit`, in addition to the standard Goodness-of-Fit
-statistics and fit results given in :ref:`Table of Fit Results
-<goodfit-table>`.
 
-   +----------------------------+------------------------------------------------------+
-   | result attribute           |  Description / Formula                               |
-   +============================+======================================================+
-   | ``init_params``            | initial set of parameters                            |
-   +----------------------------+------------------------------------------------------+
-   | ``init_fit``               | initial estimate of fit to data                      |
-   +----------------------------+------------------------------------------------------+
-   | ``best_fit``               | final estimate of fit to data                        |
-   +----------------------------+------------------------------------------------------+
+.. method:: make_params(**kws)
 
+   Create a set of parameters for model.
 
-.. attribute:: independent_vars
+    :param kws:  optional keyword/value pairs to set initial values for parameters.
+    :return: :class:`Parameters`.
 
-   list of strings for independent variables.
+    The parameters may or may not have decent initial values for each
+    parameter.
 
-.. attribute:: param_names
 
-   list of strings of parameter names.
+.. method:: set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
 
-.. attribute:: params
+   set *hints* to use when creating parameters with :meth:`make_param` for
+   the named parameter.  This is especially convenient for setting initial
+   values.  The ``name`` can include the models ``prefix`` or not.
 
-   :class:`Parameters` object for the model
+   :param name: parameter name.
+   :type name: string
+   :param value: value for parameter
+   :type value: float
+   :param min:  lower bound for parameter value
+   :type min: ``None`` or float
+   :param max:  upper bound for parameter value
+   :type max: ``None`` or float
+   :param vary:  whether to vary parameter in fit.
+   :type vary: boolean
+   :param expr:  mathematical expression for constraint
+   :type expr: string
 
-.. attribute:: prefix
+   See :ref:`model_param_hints_section`.
 
-   prefix used for name-mangling of parameter names.  The default is ''.
-   If a particular :class:`Model` has arguments ``amplitude``,
-   ``center``, and ``sigma``, these would become the parameter names.
-   Using a prefix of ``g1_`` would convert these parameter names to
-   ``g1_amplitude``, ``g1_center``, and ``g1_sigma``.   This can be
-   essential to avoid name collision in composite models.
+:class:`Model` class Attributes
+---------------------------------
+
+.. attribute:: components
+
+   a list of instances of :class:`Model` that make up a *composite model*.
+   See :ref:`composite_models_section`.  Normally, you will not need to use
+   this, but is used by :class:`Model` itself when constructing a composite
+   model from two or more models.
+
+.. attribute:: func
+
+   The model function used to calculate the model.
+
+.. attribute:: independent_vars
+
+   list of strings for names of the independent variables.
+
+.. attribute:: is_composite
+
+   Boolean value for whether model is a composite model.
 
 .. attribute:: missing
 
-   what to do for missing values.  The choices are
+   describes what to do for missing values.  The choices are
 
     * ``None``: Do not check for null or missing values (default)
     * ``'none'``: Do not check for null or missing values.
@@ -322,11 +354,32 @@ statistics and fit results given in :ref:`Table of Fit Results
     * ``'raise'``: Raise a (more helpful) exception when data contains null
                   or missing values.
 
-.. attribute:: components
+.. attribute:: name
 
-   a list of instances of :class:`Model` that make up a composite model.
-   Normally, you will not need to use this, but is used my :class:`Model`
-   itself when constructing a composite model (that is adding models together).
+   name of the model, used only in the string representation of the
+   model. By default this will be taken from the model function.
+
+.. attribute:: opts
+
+   extra keyword arguments to pass to model function.  Normally this will
+   be determined internally and should not be changed.
+
+.. attribute:: param_hints
+
+   Dictionary of parameter hints.  See :ref:`model_param_hints_section`.
+
+.. attribute:: param_names
+
+   list of strings of parameter names.
+
+.. attribute:: prefix
+
+   prefix used for name-mangling of parameter names.  The default is ''.
+   If a particular :class:`Model` has arguments ``amplitude``,
+   ``center``, and ``sigma``, these would become the parameter names.
+   Using a prefix of ``g1_`` would convert these parameter names to
+   ``g1_amplitude``, ``g1_center``, and ``g1_sigma``.   This can be
+   essential to avoid name collision in composite models.
 
 
 Determining parameter names and independent variables for a function
@@ -351,20 +404,11 @@ on Parameters, or fix their values.
 
 
 
-More Details on building models from functions
-============================================================
-
-
-Here we explore some of the variations of building a :class:`Model` from a
-user-defined function that didn't get mentioned in the example above for
-the Gaussian model.
-
-
 Explicitly specifying ``independent_vars``
 -------------------------------------------------
 
-As for the example above of the Gaussian model, creating a :class:`Model`
-from a function is fairly easy::
+As we saw for the Gaussian example above, creating a :class:`Model` from a
+function is fairly easy. Let's try another::
 
     >>> def decay(t, tau, N):
     ...    return N*np.exp(-t/tau)
@@ -378,12 +422,12 @@ from a function is fairly easy::
     tau <Parameter 'tau', None, bounds=[None:None]>
     N <Parameter 'N', None, bounds=[None:None]>
 
-Here, ``t`` is assumed to be the independent variable because it comes
-first, and that the other function arguments are used to create the
-remaining parameters are created from the other parameters.
+Here, ``t`` is assumed to be the independent variable because it is the
+first argument to the function.  The other function arguments are used to
+create parameters for the model.
 
-If you wanted ``tau`` to be the independent variable in the above example,
-you would just do this::
+If you want ``tau`` to be the independent variable in the above example,
+you can say so::
 
     >>> decay_model = Model(decay, independent_vars=['tau'])
     >>> print decay_model.independent_vars
@@ -395,12 +439,27 @@ you would just do this::
     N <Parameter 'N', None, bounds=[None:None]>
 
 
+You can also supply multiple values for multi-dimensional functions with
+multiple independent variables.  In fact, the meaning of *independent
+variable* here is simple, and based on how it treats arguments of the
+function you are modeling:
+
+independent variable
+    a function argument that is not a parameter or otherwise part of the
+    model, and that will be required to be explicitly provided as a
+    keyword argument for each fit with :meth:`fit` or evaluation
+    with :meth:`eval`.
+
+Note that independent variables are not required to be arrays, or even
+floating point numbers.
+
+
 Functions with keyword arguments
 -----------------------------------------
 
 If the model function had keyword parameters, these would be turned into
 Parameters if the supplied default value was a valid number (but not
-``None``).
+``None``, ``True``, or ``False``).
 
     >>> def decay2(t, tau, N=10, check_positive=False):
     ...    if check_small:
@@ -421,7 +480,10 @@ into a parameter, with the default numerical value as its initial value.
 By default, it is permitted to be varied in the fit -- the 10 is taken as
 an initial value, not a fixed value.  On the other hand, the
 ``check_positive`` keyword argument, was not converted to a parameter
-because it has a boolean default value.
+because it has a boolean default value.    In some sense,
+``check_positive`` becomes like an independent variable to the model.
+However, because it has a default value it is not required to be given for
+each model evaluation or fit, as independent variables are.
 
 Defining a ``prefix`` for the Parameters
 --------------------------------------------
@@ -449,46 +511,292 @@ You would refer to these parameters as ``f1_amplitude`` and so forth, and
 the model will know to map these to the ``amplitude`` argument of ``myfunc``.
 
 
-More on initialing model parameters
+Initializing model parameters
 -----------------------------------------
 
-As mentioned above, the parameters created by :class:`Model` are generally
-created with invalid initial values of ``None``.  These values must be
-initialized in order for the model to be evaluated or used in a fit.  There
-are three ways to do this initialization that can be used in any
-combination:
+As mentioned above, the parameters created by :meth:`Model.make_params` are
+generally created with invalid initial values of ``None``.  These values
+**must** be initialized in order for the model to be evaluated or used in a
+fit.  There are four different ways to do this initialization that can be
+used in any combination:
 
   1. You can supply initial values in the definition of the model function.
-  2. You can initialize the parameters after the model has been created.
-  3. You can supply initial values for the parameters to the :meth:`eval`
-     or :meth:`fit` methods.
+  2. You can initialize the parameters when creating parameters with :meth:`make_params`.
+  3. You can give parameter hints with :meth:`set_param_hint`.
+  4. You can supply initial values for the parameters when you use the
+     :meth:`eval` or :meth:`fit` methods.
+
+Of course these methods can be mixed, allowing you to overwrite initial
+values at any point in the process of defining and using the model.
+
+Initializing values in the function definition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-For option 1, consider doing::
+To supply initial values for parameters in the definition of the model
+function, you can simply supply a default value::
 
     >>> def myfunc(x, a=1, b=0):
     >>>     ...
 
-instead of::
+instead of using::
 
     >>> def myfunc(x, a, b):
     >>>     ...
 
-For option 2, you can do::
+This has the advantage of working at the function level -- all parameters
+with keywords can be treated as options.  It also means that some default
+initial value will always be available for the parameter.
+
+
+Initializing values with :meth:`make_params`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When creating parameters with :meth:`make_params` you can specify initial
+values.  To do this, use keyword arguments for the parameter names and
+initial values::
+
+    >>> mod = Model(myfunc)
+    >>> pars = mod.make_params(a=3, b=0.5)
+
+
+Initializing values by setting parameter hints
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After a model has been created, but prior to creating parameters with
+:meth:`make_params`, you can set parameter hints.  These allows you to set
+not only a default initial value but also to set other parameter attributes
+controlling bounds, whether it is varied in the fit, or a constraint
+expression.  To set a parameter hint, you can use :meth:`set_param_hint`,
+as with::
+
+    >>> mod = Model(myfunc)
+    >>> mod.set_param_hint('a', value = 1.0)
+    >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
+    >>> pars = mod.make_params()
+
+Parameter hints are discussed in more detail in section
+:ref:`model_param_hints_section`.
+
+
+Initializing values when using a model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally, you can explicitly supply initial values when using a model.  That
+is, as with :meth:`make_params`, you can include values
+as keyword arguments to either the :meth:`eval` or :meth:`fit` methods::
+
+   >>> y1 = mod.eval(x=x, a=7.0, b=-2.0)
+
+   >>> out = mod.fit(x=x, pars, a=3.0, b=-0.0)
+
+These approachess to initialization provide many opportunities for setting
+initial values for parameters.  The methods can be combined, so that you
+can set parameter hints but then change the initial value explicitly with
+:meth:`fit`.
+
+.. _model_param_hints_section:
+
+Using parameter hints
+--------------------------------
+
+
+After a model has been created, you can give it hints for how to create
+parameters with :meth:`make_params`.  This allows you to set not only a
+default initial value but also to set other parameter attributes
+controlling bounds, whether it is varied in the fit, or a constraint
+expression.   To set a parameter hint, you can use :meth:`set_param_hint`,
+as with::
 
     >>> mod = Model(myfunc)
-    >>> mod.params['a'].value = 1.0
-    >>> mod.params['b'].value = 0.1
+    >>> mod.set_param_hint('a', value = 1.0)
+    >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
+
+Parameter hints are stored in a model's :attr:`param_hints` attribute,
+which is simply a nested dictionary::
+
+    >>> print mod.param_hints
+    {'a': {'value': 1}, 'b': {'max': 1.0, 'value': 0.3, 'min': 0}}
+
+
+You can change this dictionary directly, or with the :meth:`set_param_hint`
+method.  Either way, these parameter hints are used by :meth:`make_params`
+when making parameters.
+
+An important feature of parameter hints is that you can force the creation
+of new parameters with parameter hints.  This can be useful to make derived
+parameters with constraint expressions.  For example to get the full-width
+at half maximum of a Gaussian model, one could use a parameter hint of::
+
+    >>> mod = Model(gaussian)
+    >>> mod.set_param_hint('fwhm', expr='2.3548*sigma')
+
+
+
+The :class:`ModelFit` class
+=======================================
+
+A :class:`ModelFit` is the object returned by :meth:`Model.fit`.  It is a
+sublcass of :class:`Minimizer`, and so contains many of the fit results.
+Of course, it knows the :class:`Model` and the set of :class:`Parameters`
+used in the fit, and it has methods to evaluate the model, to fit the data
+(or re-fit the data with changes to the parameters, or fit with different
+or modified data) and to print out a report for that fit.
+
+While a :class:`Model` encapsulates your model function, it is fairly
+abstract and does not contain the parameters or data used in a particular
+fit.  A :class:`ModelFit` *does* contain parameters and data as well as
+methods to alter and re-do fits.  Thus the :class:`Model` is the idealized
+model while the :class:`ModelFit` is the messier, more complex (but perhaps
+more useful) object that represents a fit with a set of parameters to data
+with a model.
+
+A :class:`ModelFit` has several attributes holding values for fit results,
+and several methods for working with fits.
+
+:class:`ModelFit` methods
+---------------------------------
+
+These methods are all inherited from :class:`Minimize` or from
+:class:`Model`.
+
+.. method:: eval(**kwargs)
+
+   evaluate the model using the best-fit parameters and supplied
+   independent variables.  The ``**kwargs`` arguments can be used to update
+   parameter values and/or independent variables.
+
+.. method:: fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
+
+   fit (or re-fit), optionally changing ``data``, ``params``, ``weights``,
+   or ``method``, or changing the independent variable(s) with the
+   ``**kwargs`` argument.  See :meth:`Model.fit` for argument
+   descriptions, and note that any value of ``None`` defaults to the last
+   used value.
+
+.. method:: fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]])
+
+   return a printable fit report for the fit with fit statistics, best-fit
+   values with uncertainties and correlations.  As with :func:`fit_report`.
+
+   :param modelpars:    Parameters with "Known Values" (optional, default None)
+   :param show_correl:  whether to show list of sorted correlations [``True``]
+   :param min_correl:   smallest correlation absolute value to show [0.1]
+
+
+
+
+:class:`ModelFit` attributes
+---------------------------------
+
+.. attribute:: best_fit
+
+   ndarray result of model function, evaluated at provided
+   independent variables and with best-fit parameters.
+
+.. attribute:: best_values
+
+   dictionary with  parameter names as keys, and best-fit values as values.
+
+.. attribute:: chisqr
+
+   floating point best-fit chi-square statistic.
+
+.. attribute:: covar
+
+   ndarray (square) covariance matrix returned from fit.
+
+.. attribute:: data
+
+   ndarray of data to compare to model.
+
+.. attribute:: errorbars
+
+   boolean for whether error bars were estimated by fit.
+
+.. attribute::  ier
+
+   integer returned code from :func:`scipy.optimize.leastsq`.
+
+.. attribute:: init_fit
+
+   ndarray result of model function, evaluated at provided
+   independent variables and with initial parameters.
+
+.. attribute:: init_params
+
+   initial parameters.
+
+.. attribute:: init_values
+
+   dictionary with  parameter names as keys, and initial values as values.
 
-An advantage of this approach is that you can set other parameter
-attributes such as bounds and constraints.
+.. attribute:: iter_cb
 
-For option 3, give explicit initial values for the parameters:
+   optional callable function, to be called at each fit iteration.  This
+   must take take arguments of ``params, iter, resid, *args, **kws``, where
+   ``params`` will have the current parameter values, ``iter`` the
+   iteration, ``resid`` the current residual array, and ``*args`` and
+   ``**kws`` as passed to the objective function.
 
-   >>> y1 = mod.eval(x=x, a=1, b=3)
+.. attribute:: jacfcn
 
-Again, these methods can be combined.  For example, you can set parameter
-values and bounds as with option 2, but then change the initial value with
-option 3.
+   optional callable function, to be called to calculate jacobian array.
+
+.. attribute::  lmdif_message
+
+   string message returned from :func:`scipy.optimize.leastsq`.
+
+.. attribute::  message
+
+   string message returned from :func:`minimize`.
+
+.. attribute::  method
+
+   string naming fitting method for :func:`minimize`.
+
+.. attribute::  model
+
+   instance of :class:`Model` used for model.
+
+.. attribute::  ndata
+
+    integer number of data points.
+
+.. attribute::  nfev
+
+    integer number of function evaluations used for fit.
+
+.. attribute::  nfree
+
+    integer number of free paramaeters in fit.
+
+.. attribute::  nvarys
+
+    integer number of independent, freely varying variables in fit.
+
+.. attribute::  params
+
+    Parameters used in fit.  Will have best-fit values.
+
+.. attribute::  redchi
+
+    floating point reduced chi-square statistic
+
+.. attribute::  residual
+
+   ndarray for residual.
+
+.. attribute::  scale_covar
+
+   boolean flag for whether to automatically scale covariance matrix.
+
+.. attribute:: success
+
+   boolean value of whether fit succeeded.
+
+.. attribute:: weights
+
+   ndarray (or ``None``) of weighting values used in fit.
 
 
 .. _composite_models_section:
@@ -496,7 +804,6 @@ option 3.
 Creating composite models
 =============================
 
-
 One of the most interesting features of the :class:`Model` class is that
 models can be added together to give a composite model, with parameters
 from the component models all being available to influence the total sum of
@@ -537,7 +844,10 @@ This model has parameters for both component models, and can be used as:
 
 which prints out the results::
 
-
+    [[Model]]
+     Composite Model:
+        gaussian
+        line
     [[Fit Statistics]]
         # function evals   = 44
         # data points      = 101
@@ -545,28 +855,94 @@ which prints out the results::
         chi-square         = 2.579
         reduced chi-square = 0.027
     [[Variables]]
-         amp:           8.459311 +/- 0.1241451 (1.47%) initial =  5
-         cen:           5.655479 +/- 0.009176784 (0.16%) initial =  5
-         intercept:    -0.968602 +/- 0.03352202 (3.46%) initial =  1
-         slope:         0.264844 +/- 0.005748921 (2.17%) initial =  0
-         wid:           0.6754552 +/- 0.009916862 (1.47%) initial =  1
+        amp:         8.45931061 +/- 0.124145 (1.47%) (init= 5)
+        cen:         5.65547872 +/- 0.009176 (0.16%) (init= 5)
+        intercept:  -0.96860201 +/- 0.033522 (3.46%) (init= 1)
+        slope:       0.26484403 +/- 0.005748 (2.17%) (init= 0)
+        wid:         0.67545523 +/- 0.009916 (1.47%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
         C(amp, wid)                  =  0.666
         C(cen, intercept)            =  0.129
 
-and shows the plot:
 
-.. image:: _images/model_fit2.png
-   :target: _images/model_fit2.png
-   :width: 50%
+and shows the plot on the left.
 
+.. _figModel2:
 
-which shows the data in blue dots, the best fit as a solid red line, and
-the initial fit in black dashed line.
+  .. image:: _images/model_fit2.png
+     :target: _images/model_fit2.png
+     :width: 48%
+  .. image:: _images/model_fit2a.png
+     :target: _images/model_fit2a.png
+     :width: 48%
+
+
+On the left, data is shown in blue dots, the total fit is shown in solid
+red line, and the initial fit is shown as a black dashed line.  In the
+figure on the right, the data is again shown in blue dots, and the Gaussian
+component shown as a black dashed line, and the linear component shown as a
+red dashed line.  These components were generated after the fit using the
+Models :meth:`eval` method::
+
+
+    comp_gauss = mod.components[0].eval(x=x)
+    comp_line  = mod.components[1].eval(x=x)
+
+
+Note that we have to pass in ``x`` here, but not any of the final values
+for the parameters -- the current values for ``mod.params`` will be used,
+and these will be the best-fit values after a fit.  While the model does
+store the best parameters and the estimate of the data in ``mod.best_fit``,
+it does not actually store the data it fit to or the independent variables
+-- here, ``x`` for that data.  That means you can easily apply this model
+to other data sets, or evaluate the model at other values of ``x``.   You
+may want to do this to give a finer or coarser spacing of data point,  or to
+extrapolate the model outside the fitting range.    This can be done with::
+
+    xwide = np.linspace(-5, 25, 3001)
+    predicted = mod.eval(x=xwide)
+
+
+A final note: In this example, the argument names for the model functions
+do not overlap.  If they had, the ``prefix`` argument to :class:`Model`
+would have allowed us to identify which parameter went with which component
+model.  As we will see in the next chapter, using composite models with the
+built-in models provides a simple way to build up complex models.
+
+Model names for composite models
+-----------------------------------------
+
+By default a `Model` object has a `name` attribute containing the name of
+the model function. This name can be overridden when building a model::
+
+    my_model = Model(gaussian, name='my_gaussian')
+
+or by assigning the `name` attribute::
+
+    my_model = Model(gaussian)
+    my_model.name = 'my_gaussian'
+
+This name is used in the object representation (for example when printing)::
+
+    <lmfit.Model: my_gaussian>
+
+A composite model will have the name `'composite_fun'` by default, but as
+noted, we can overwrite it with a more meaningful string. This can be useful
+when dealing with multiple models.
+
+For example, let assume we want to fit some bi-modal data. We initially try
+two Gaussian peaks::
+
+    model = GaussianModel(prefix='p1_') + GaussianModel(prefix='p2_')
+    model.name = '2-Gaussians model'
+
+Here, instead of the standard name `'composite_func'`, we assigned a more
+meaningful name. Now, if we want to also fit with two Lorentzian peaks
+we can do similarly::
+
+    model2 = LorentzianModel(prefix='p1_') + LorentzianModel(prefix='p2_')
+    model2.name = '2-Lorentzians model'
 
-In this example, the argument names for the model functions do not overlap.
-If they had, the ``prefix`` argument to :class:`Model` would have allowed
-us to identify which parameter went with which component model.  As we will
-see in the next chapter, using composite models with the built-in models
-provides a simple way to build up complex models.
+It is evident that assigning names will help to easily distinguish
+the different models.
 
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 57c3296..be2f64f 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -4,10 +4,33 @@
 :class:`Parameter`  and :class:`Parameters`
 ================================================
 
-This chapter describes :class:`Parameter` objects, which are
-fundamental to the lmfit approach to optimization.   Most real use cases
-will use the :class:`Parameters` class, which provides an (ordered)
-dictionary of :class:`Parameter` objects.
+This chapter describes :class:`Parameter` objects which is the key concept
+of lmfit.  A :class:`Parameter` is the quantity to be optimized in all
+minimization problems, replacing the plain floating point number used in
+the optimization routines from :mod:`scipy.optimize`.  A :class:`Parameter`
+has a value that can be varied in the fit, fixed, have upper and/or lower
+bounds.  It can even have a value that is constrained by an algebraic
+expression of other Parameter values.  Since :class:`Parameters` live
+outside the core optimization routines, they can be used in **all**
+optimization routines from :mod:`scipy.optimize`.  By using
+:class:`Parameter` objects instead of plain variables, the objective
+function does not have to be modified to reflect every change of what is
+varied in the fit.  This simplifies the writing of models, allowing general
+models that describe the phenomenon to be written, and gives the user more
+flexibility in using and testing variations of that model.
+
+Whereas a :class:`Parameter` expands on an individual floating point
+variable, the optimization methods need an ordered group of floating point
+variables.  In the :mod:`scipy.optimize` routines this is required to be a
+1-dimensional numpy ndarray.  For lmfit, where each :class:`Parameter` has
+a name, this is replaced by a :class:`Parameters` class, which works as an
+ordered dictionary of :class:`Parameter` objects, with a few additional
+features and methods.  That is, while the concept of a :class:`Parameter`
+is central to lmfit, one normally creates and interacts with a
+:class:`Parameters` instance that contains many :class:`Parameter`
+objects.  The objective functions you write will take an instance of
+:class:`Parameters` as its first argument.
+
 
 The :class:`Parameter` class
 ========================================
@@ -20,7 +43,7 @@ The :class:`Parameter` class
    :type name: ``None`` or string -- will be overwritten during fit if ``None``.
    :param value: the numerical value for the parameter
    :param vary:  whether to vary the parameter or not.
-   :type vary:  boolean (``True``/``False``)
+   :type vary:  boolean (``True``/``False``) [default ``True``]
    :param min:  lower bound for value (``None`` = no lower bound).
    :param max:  upper bound for value (``None`` = no upper bound).
    :param expr:  mathematical expression to use to evaluate value during fit.
@@ -29,9 +52,10 @@ The :class:`Parameter` class
 
 Each of these inputs is turned into an attribute of the same name.
 
-After a fit, a Parameter for a fitted variable (ie with vary = ``True``)
-will have the :attr:`value` attribute holding the best-fit value, and may
-(depending on the success of the fit) have obtain additional attributes.
+After a fit, a Parameter for a fitted variable (that is with vary =
+``True``) will have the :attr:`value` attribute holding the best-fit value.
+Depending on the success of the fit and fitting algorithm used, it may also
+have attributes :attr:`stderr` and :attr:`correl`.
 
 .. attribute:: stderr
 
@@ -44,14 +68,61 @@ will have the :attr:`value` attribute holding the best-fit value, and may
 
    {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
 
-For details of the use of the bounds :attr:`min` and :attr:`max`,
-see :ref:`bounds_chapter`.
+See :ref:`bounds_chapter` for details on the math used to implement the
+bounds with :attr:`min` and :attr:`max`.
 
 The :attr:`expr` attribute can contain a mathematical expression that will
 be used to compute the value for the Parameter at each step in the fit.
 See :ref:`constraints_chapter` for more details and examples of this
 feature.
 
+.. index:: Removing a Constraint Expression
+
+.. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None]]]])
+
+   set or update a Parameters value or other attributes.
+
+   :param name:  parameter name
+   :param value: the numerical value for the parameter
+   :param vary:  whether to vary the parameter or not.
+   :param min:   lower bound for value
+   :param max:   upper bound for value
+   :param expr:  mathematical expression to use to evaluate value during fit.
+
+   Each argument of :meth:`set` has a default value of ``None``, and will
+   be set only if the provided value is not ``None``.  You can use this to
+   update some Parameter attribute without affecting others, for example::
+
+       p1 = Parameter('a', value=2.0)
+       p2 = Parameter('b', value=0.0)
+       p1.set(min=0)
+       p2.set(vary=False)
+
+   to set a lower bound, or to set a Parameter as have a fixed value.
+
+   Note that to use this approach to lift a lower or upper bound, doing::
+
+       p1.set(min=0)
+       .....
+       # now lift the lower bound
+       p1.set(min=None)   # won't work!  lower bound NOT changed
+
+   won't work -- this will not change the current lower bound.  Instead
+   you'll have to use ``np.inf`` to remove a lower or upper bound::
+
+       # now lift the lower bound
+       p1.set(min=-np.inf)   # will work!
+
+   Similarly, to clear an expression of a parameter, you need to pass an
+   empty string, not ``None``.  You also need to give a value and
+   explicitly tell it to vary::
+
+       p3 = Parameter('c', expr='(a+b)/2')
+       p3.set(expr=None)     # won't work!  expression NOT changed
+
+       # remove constraint expression
+       p3.set(value=1.0, vary=True, expr='')  # will work!  parameter now unconstrained
+
 
 The :class:`Parameters` class
 ========================================
@@ -68,11 +139,12 @@ The :class:`Parameters` class
    2. values must be valid :class:`Parameter` objects.
 
 
-   Two methods for provided for convenience of initializing Parameters.
+   Two methods are for provided for convenient initialization of a :class:`Parameters`,
+   and one for extracting :class:`Parameter` values into a plain dictionary.
 
 .. method:: add(name[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
 
-   add a named parameter.  This simply creates a :class:`Parameter`
+   add a named parameter.  This creates a :class:`Parameter`
    object associated with the key `name`, with optional arguments
    passed to :class:`Parameter`::
 
@@ -86,9 +158,9 @@ The :class:`Parameters` class
 
         name, value, vary, min, max, expr
 
-   That is, this method is somewhat rigid and verbose (no default values),
-   but can be useful when initially defining a parameter list so that it
-   looks table-like::
+   This method is somewhat rigid and verbose (no default values), but can
+   be useful when initially defining a parameter list so that it looks
+   table-like::
 
      p = Parameters()
      #           (Name,  Value,  Vary,   Min,  Max,  Expr)
@@ -100,12 +172,35 @@ The :class:`Parameters` class
                 ('wid2',  None, False, None, None, '2*wid1/3'))
 
 
+.. method:: valuesdict(self)
+
+   return an ordered dictionary of name:value pairs containing the
+   :attr:`name` and :attr:`value` of a Parameter.
+
+   This is distinct from the :class:`Parameters` itself, as the dictionary
+   values are not :class:`Parameeter` objects, just the :attr:`value`.
+   This can be a very convenient way to get updated values in a objective
+   function.
+
+
 Simple Example
 ==================
 
-Putting it all together, a simple example of using a dictionary of
-:class:`Parameter` objects and :func:`minimize` might look like this:
+Using :class:`Parameters`` and :func:`minimize` function (discussed in the
+next chapter) might look like this:
 
 .. literalinclude:: ../examples/doc_basic.py
 
 
+Here, the objective function explicitly unpacks each Parameter value.  This
+can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
+which would make the objective function ``fcn2min`` above look like::
+
+    def fcn2min(params, x, data):
+        """ model decaying sine wave, subtract data"""
+        v = params.valuesdict()
+
+        model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+        return model - data
+
+The results are identical, and the difference is a stylisic choice.
diff --git a/doc/sphinx/ext_mathjax.py b/doc/sphinx/ext_mathjax.py
new file mode 100644
index 0000000..40de659
--- /dev/null
+++ b/doc/sphinx/ext_mathjax.py
@@ -0,0 +1,10 @@
+# sphinx extensions for mathjax
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.intersphinx',
+              'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(mathjax)
diff --git a/doc/sphinx/ext_pngmath.py b/doc/sphinx/ext_pngmath.py
new file mode 100644
index 0000000..cf153fe
--- /dev/null
+++ b/doc/sphinx/ext_pngmath.py
@@ -0,0 +1,10 @@
+# sphinx extensions for pngmath
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.intersphinx',
+              'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(pngmath)
diff --git a/doc/sphinx/mathjax/conf.py b/doc/sphinx/mathjax/conf.py
deleted file mode 100644
index 451458e..0000000
--- a/doc/sphinx/mathjax/conf.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# lmfit documentation build configuration file
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
-sys.path.append(os.path.abspath(os.path.join('.')))
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.todo',
-              'sphinx.ext.coverage',
-              'sphinx.ext.mathjax',
-              'sphinx.ext.intersphinx',
-              'numpydoc']
-
-try:
-    import IPython.sphinxext.ipython_directive
-    extensions.extend(['IPython.sphinxext.ipython_directive',
-                       'IPython.sphinxext.ipython_console_highlighting'])
-except ImportError:
-    pass
-
-
-intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
-                       'numpy': ('http://scipy.org/docs/numpy/', None),
-                       'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
-
-intersphinx_cache_limit = 10
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'lmfit'
-copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-try:
-    import lmfit
-    release = lmfit.__version__
-# The full version, including alpha/beta/rc tags.
-except ImportError:
-    release = 'latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-html_theme_path = ['sphinx/theme']
-html_theme = 'lmfitdoc'
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = 'Minimization and Curve-Fitting for Python'
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
-
-html_use_modindex = False
-#html_use_index = True
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'lmfitdoc'
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'lmfit.tex',
-   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
-   'Matthew Newville, Till Stensitzki, and others', 'manual'),
-]
-
diff --git a/doc/sphinx/pngmath/conf.py b/doc/sphinx/pngmath/conf.py
deleted file mode 100644
index 2fb8e92..0000000
--- a/doc/sphinx/pngmath/conf.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# lmfit documentation build configuration file
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
-sys.path.append(os.path.abspath(os.path.join('.')))
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.todo',
-              'sphinx.ext.coverage',
-              'sphinx.ext.pngmath',
-              'sphinx.ext.intersphinx',
-              'numpydoc']
-
-try:
-    import IPython.sphinxext.ipython_directive
-    extensions.extend(['IPython.sphinxext.ipython_directive',
-                       'IPython.sphinxext.ipython_console_highlighting'])
-except ImportError:
-    pass
-
-
-intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
-                       'numpy': ('http://scipy.org/docs/numpy/', None),
-                       'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
-
-intersphinx_cache_limit = 10
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'lmfit'
-copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-try:
-    import lmfit
-    release = lmfit.__version__
-# The full version, including alpha/beta/rc tags.
-except ImportError:
-    release = 'latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-html_theme_path = ['sphinx/theme']
-html_theme = 'lmfitdoc'
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = 'Minimization and Curve-Fitting for Python'
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
-
-html_use_modindex = False
-#html_use_index = True
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'lmfitdoc'
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'lmfit.tex',
-   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
-   'Matthew Newville, Till Stensitzki, and others', 'manual'),
-]
-
diff --git a/doc/sphinx/theme/lmfitdoc/layout.html b/doc/sphinx/theme/lmfitdoc/layout.html
index 5bf78eb..0681d42 100644
--- a/doc/sphinx/theme/lmfitdoc/layout.html
+++ b/doc/sphinx/theme/lmfitdoc/layout.html
@@ -9,6 +9,63 @@
 #}
 {%- extends "basic/layout.html" %}
 
+{%- block extrahead %}
+  <script type="text/x-mathjax-config">
+     MathJax.Hub.Config({
+        "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
+        "HTML-CSS": {scale: 90}
+  });</script>
+{% endblock %}
+
+
+
+{% block rootrellink %}
+   <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
+   <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
+   <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
+   <li><a href="{{ pathto('model') }}"> model</a>|</li>
+   <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
+   <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
+   <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
+   <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
+{% endblock %}
+
+{% block relbar1 %}
+<div>
+<table border=0>
+  <tr><td></td><td width=75% padding=5 align=left>
+       <a href="index.html" style="color: #157"> <font size=+3>LMFIT</font></a>
+     </td><td></td>
+     <td width=8% align=left>
+         <a href="contents.html" style="color: #882222">
+         <font size+=1>Contents</font></a> </td>
+     <td width=8% align=left>
+          <a href="installation.html" style="color: #882222">
+          <font size+=1>Download</font></a></td>
+     <td width=8% align=left>
+        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
+         <font size+=1>Develop</font></a></td>
+  </tr>
+  <tr><td></td><td width=75% padding=5 align=left>
+        <a href="index.html" style="color: #157"> <font size=+2>
+	Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
+     </td><td></td>
+     <td width=8% align=left>
+         <a href="intro.html" style="color: #882222">
+         <font size+=1>Introduction</font></a> </td>
+     <td width=8% align=left>
+         <a href="parameters.html" style="color: #882222">
+         <font size+=1>Parameters</font></a> </td>
+     <td width=8% align=left>
+         <a href="model.html" style="color: #882222">
+         <font size+=1>Models</font></a> </td>
+
+  </tr>
+</table>
+</div>
+{{ super() }}
+{% endblock %}
+
 {# put the sidebar before the body #}
 {% block sidebar1 %}{{ sidebar() }}{% endblock %}
 {% block sidebar2 %}{% endblock %}
diff --git a/examples/example_ci.py b/examples/confidence_interval.py
similarity index 100%
rename from examples/example_ci.py
rename to examples/confidence_interval.py
diff --git a/examples/example_ci2.py b/examples/confidence_interval2.py
similarity index 100%
rename from examples/example_ci2.py
rename to examples/confidence_interval2.py
diff --git a/examples/doc_basic_valuesdict.py b/examples/doc_basic_valuesdict.py
new file mode 100644
index 0000000..8073106
--- /dev/null
+++ b/examples/doc_basic_valuesdict.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+#<examples/doc_basic.py>
+from lmfit import minimize, Parameters, Parameter, report_fit
+import numpy as np
+
+# create data to be fitted
+x = np.linspace(0, 15, 301)
+data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+        np.random.normal(size=len(x), scale=0.2) )
+
+# define objective function: returns the array to be minimized
+def fcn2min(params, x, data):
+    """ model decaying sine wave, subtract data"""
+    v = params.valuesdict()
+    
+    model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+    return model - data
+
+# create a set of Parameters
+params = Parameters()
+params.add('amp',   value= 10,  min=0)
+params.add('decay', value= 0.1)
+params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
+params.add('omega', value= 3.0)
+
+
+# do fit, here with leastsq model
+result = minimize(fcn2min, params, args=(x, data))
+
+# calculate final result
+final = data + result.residual
+
+# write error report
+report_fit(params)
+
+# try to plot results
+try:
+    import pylab
+    pylab.plot(x, data, 'k+')
+    pylab.plot(x, final, 'r')
+    pylab.show()
+except:
+    pass
+
+#<end of examples/doc_basic.py>
diff --git a/examples/doc_confidence1.py b/examples/doc_confidence1.py
new file mode 100644
index 0000000..81e415e
--- /dev/null
+++ b/examples/doc_confidence1.py
@@ -0,0 +1,24 @@
+import lmfit
+import numpy as np
+
+x = np.linspace(0.3,10,100)
+np.random.seed(0)
+
+y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+
+p = lmfit.Parameters()
+p.add_many(('a', 0.1), ('b', 1))
+
+def residual(p):
+   a = p['a'].value
+   b = p['b'].value
+
+   return 1/(a*x)+b-y
+
+mi = lmfit.minimize(residual, p)
+lmfit.printfuncs.report_fit(mi.params)
+
+ci = lmfit.conf_interval(mi)
+lmfit.printfuncs.report_ci(ci)
+
+
diff --git a/examples/doc_confidence2.py b/examples/doc_confidence2.py
new file mode 100644
index 0000000..489df88
--- /dev/null
+++ b/examples/doc_confidence2.py
@@ -0,0 +1,61 @@
+import lmfit
+import numpy as np
+import matplotlib
+matplotlib.use('WXAgg')
+
+import matplotlib.pyplot as plt
+
+
+x = np.linspace(1, 10, 250)
+np.random.seed(0)
+y = 3.0*np.exp(-x/2) -5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(len(x))
+
+p = lmfit.Parameters()
+p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.))
+
+def residual(p):
+   v = p.valuesdict()
+   return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1)/v['t2'])-y
+
+# first solve with Nelder-Mead
+mi = lmfit.minimize(residual, p, method='Nelder')
+
+mi = lmfit.minimize(residual, p)
+
+lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
+
+ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
+lmfit.printfuncs.report_ci(ci)
+
+plot_type = 3
+if plot_type == 0:
+    plt.plot(x, y)
+    plt.plot(x, residual(p)+y )
+
+elif plot_type == 1:
+    cx, cy, grid = lmfit.conf_interval2d(mi,'a2','t2',30,30)
+    plt.contourf(cx, cy, grid, np.linspace(0,1,11))
+    plt.xlabel('a2')
+    plt.colorbar()
+    plt.ylabel('t2')
+
+elif plot_type == 2:
+    cx, cy, grid = lmfit.conf_interval2d(mi,'a1','t2',30,30)
+    plt.contourf(cx, cy, grid, np.linspace(0,1,11))
+    plt.xlabel('a1')
+    plt.colorbar()
+    plt.ylabel('t2')
+
+    
+elif plot_type == 3:
+    cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
+    cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
+    plt.scatter(cx1, cy1, c=prob, s=30)
+    plt.scatter(cx2, cy2, c=prob2, s=30)
+    plt.gca().set_xlim((2.5, 3.5))
+    plt.gca().set_ylim((11, 13))
+    plt.xlabel('a1')
+    plt.ylabel('t2')
+
+if plot_type > 0:
+    plt.show()
diff --git a/examples/doc_model1.py b/examples/doc_model1.py
index 6085766..ced7da3 100644
--- a/examples/doc_model1.py
+++ b/examples/doc_model1.py
@@ -16,7 +16,7 @@ def gaussian(x, amp, cen, wid):
 gmod = Model(gaussian)
 result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
 
-print(gmod.fit_report())
+print(result.fit_report())
 
 plt.plot(x, y,         'bo')
 plt.plot(x, result.init_fit, 'k--')
diff --git a/examples/doc_model2.py b/examples/doc_model2.py
index 79635af..c54c12c 100644
--- a/examples/doc_model2.py
+++ b/examples/doc_model2.py
@@ -18,9 +18,16 @@ def line(x, slope, intercept):
     return slope * x + intercept
 
 mod = Model(gaussian) + Model(line)
-result = mod.fit(y, x=x, amp=5, cen=5, wid=1, slope=0, intercept=1)
+pars  = mod.make_params( amp=5, cen=5, wid=1, slope=0, intercept=1)
 
-print(mod.fit_report())
+print mod
+
+for k, v in pars.items():
+    print k, v
+
+result = mod.fit(y, pars, x=x)
+
+print(result.fit_report())
 
 plt.plot(x, y,         'bo')
 plt.plot(x, result.init_fit, 'k--')
diff --git a/examples/doc_model_with_iter_callback.py b/examples/doc_model_with_iter_callback.py
new file mode 100644
index 0000000..1f6bdc6
--- /dev/null
+++ b/examples/doc_model_with_iter_callback.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+#<examples/doc_with_itercb.py>
+
+from numpy import linspace, random
+import matplotlib.pylab as pylab
+
+from lmfit.models import LinearModel, GaussianModel
+from lmfit.lineshapes import gaussian
+
+def per_iteration(pars, iter, resid, *args, **kws):
+    if iter < 3 or iter % 10 == 0:
+        out = ['== %i ' % iter]
+        for key, val in pars.valuesdict().items():
+            out.append('%s=%.3f' % (key, val))
+        print ', '.join(out)
+        print args, kws
+
+
+x = linspace(0., 20, 401)
+y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+y = y  - .20*x + 3.333 + random.normal(scale=0.23,  size=len(x))
+
+mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+pars = mod.make_params()
+pars['peak_amplitude'].value = 3.0
+pars['peak_center'].value = 6.0
+pars['peak_sigma'].value = 2.0
+pars['bkg_intercept'].value = 0.0
+pars['bkg_slope'].value = 0.0
+
+
+out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
+
+pylab.plot(x, y, 'b--')
+
+# print(' Nfev = ', out.nfev)
+print( out.fit_report())
+
+pylab.plot(x, out.best_fit, 'k-')
+pylab.show()
+
+#<end examples/doc_with_itercb.py>
diff --git a/examples/doc_nistgauss.py b/examples/doc_nistgauss.py
index ce9fa49..861c3cf 100644
--- a/examples/doc_nistgauss.py
+++ b/examples/doc_nistgauss.py
@@ -2,7 +2,7 @@
 #<examples/doc_nistgauss.py>
 import numpy as np
 from lmfit.models import GaussianModel, ExponentialModel
-
+import sys
 import matplotlib.pyplot as plt
 
 dat = np.loadtxt('NIST_Gauss2.dat')
@@ -10,27 +10,34 @@ x = dat[:, 1]
 y = dat[:, 0]
 
 exp_mod = ExponentialModel(prefix='exp_')
-exp_mod.guess_starting_values(y, x=x)
+pars = exp_mod.guess(y, x=x)
 
 gauss1  = GaussianModel(prefix='g1_')
+pars.update( gauss1.make_params())
+
+pars['g1_center'].set(105, min=75, max=125)
+pars['g1_sigma'].set(15, min=3)
+pars['g1_amplitude'].set(2000, min=10)
+
 gauss2  = GaussianModel(prefix='g2_')
 
-gauss1.set_paramval('center',    105, min=75, max=125)
-gauss1.set_paramval('sigma',      15, min=3)
-gauss1.set_paramval('amplitude', 2000, min=10)
+pars.update(gauss2.make_params())
 
-gauss2.set_paramval('center',    155, min=125, max=175)
-gauss2.set_paramval('sigma',      15, min=3)
-gauss2.set_paramval('amplitude', 2000, min=10)
+pars['g2_center'].set(155, min=125, max=175)
+pars['g2_sigma'].set(15, min=3)
+pars['g2_amplitude'].set(2000, min=10)
 
 mod = gauss1 + gauss2 + exp_mod
 
-out = mod.fit(y, x=x)
-
-print(mod.fit_report(min_correl=0.5))
 
+init = mod.eval(pars, x=x)
 plt.plot(x, y)
-plt.plot(x, out.init_fit, 'k--')
+plt.plot(x, init, 'k--')
+
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report(min_correl=0.5))
+
 plt.plot(x, out.best_fit, 'r-')
 plt.show()
 #<end examples/doc_nistgauss.py>
diff --git a/examples/doc_nistgauss2.py b/examples/doc_nistgauss2.py
index 852224f..8b2ff21 100644
--- a/examples/doc_nistgauss2.py
+++ b/examples/doc_nistgauss2.py
@@ -13,7 +13,6 @@ exp_mod = ExponentialModel(prefix='exp_')
 gauss1  = GaussianModel(prefix='g1_')
 gauss2  = GaussianModel(prefix='g2_')
 
-
 def index_of(arrval, value):
     "return index of array *at or below* value "
     if value < min(arrval):  return 0
@@ -23,15 +22,16 @@ ix1 = index_of(x,  75)
 ix2 = index_of(x, 135)
 ix3 = index_of(x, 175)
 
-exp_mod.guess_starting_values(y[:ix1], x=x[:ix1])
-gauss1.guess_starting_values(y[ix1:ix2], x=x[ix1:ix2])
-gauss2.guess_starting_values(y[ix2:ix3], x=x[ix2:ix3])
+pars1 = exp_mod.guess(y[:ix1], x=x[:ix1])
+pars2 = gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
+pars3 = gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
 
+pars = pars1 + pars2 + pars3
 mod = gauss1 + gauss2 + exp_mod
 
-out = mod.fit(y, x=x)
+out = mod.fit(y, pars, x=x)
 
-print(mod.fit_report(min_correl=0.5))
+print(out.fit_report(min_correl=0.5))
 
 plt.plot(x, y)
 plt.plot(x, out.init_fit, 'k--')
diff --git a/examples/doc_peakmodels.py b/examples/doc_peakmodels.py
new file mode 100644
index 0000000..55a47c3
--- /dev/null
+++ b/examples/doc_peakmodels.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+#<examples/doc_peakmodels.py>
+from numpy import loadtxt
+from lmfit.models import LorentzianModel, GaussianModel, VoigtModel
+import matplotlib.pyplot as plt
+
+data = loadtxt('test_peak.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+MODEL = 'gauss'
+MODEL = 'loren'
+MODEL = 'voigt'
+# gamma_free = False
+gamma_free = True
+
+if MODEL.lower().startswith('g'):
+    mod = GaussianModel()
+elif MODEL.lower().startswith('l'):
+    mod = LorentzianModel()
+elif MODEL.lower().startswith('v'):
+    mod = VoigtModel()
+
+pars = mod.guess(y, x=x)
+
+if gamma_free:
+    pars['gamma'].set(value=0.7, vary=True, expr='')
+
+out = mod.fit(y, pars, x=x)
+print(out.fit_report(min_correl=0.25))
+
+plt.plot(x, y,  'b-')
+plt.plot(x, out.best_fit,  'r-')
+plt.show()
+
+
+#<end examples/doc_peakmodels.py>
diff --git a/examples/doc_stepmodel.py b/examples/doc_stepmodel.py
index d166b08..4d97832 100644
--- a/examples/doc_stepmodel.py
+++ b/examples/doc_stepmodel.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 #<examples/doc_stepmodel.py>
 import numpy as np
-from lmfit.models import StepModel, ConstantModel
+from lmfit.models import StepModel, LinearModel
 
 import matplotlib.pyplot as plt
 
@@ -9,17 +9,18 @@ x = np.linspace(0, 10, 201)
 y = np.ones_like(x)
 y[:48] = 0.0
 y[48:77] = np.arange(77-48)/(77.0-48)
-y = 110.2 * (y + 9e-3*np.random.randn(len(x))) + 12.0
+y = 110.2 * (y + 9e-3*np.random.randn(len(x))) + 12.0 + 2.22*x
 
-step_mod = StepModel(form='erf')
-offset = ConstantModel()
-step_mod.guess_starting_values(y, x)
-offset.set_paramval('c', y.min())
+step_mod = StepModel(form='erf', prefix='step_')
+line_mod = LinearModel(prefix='line_')
 
-mod = step_mod + offset
-out = mod.fit(y, x=x)
+pars =  line_mod.make_params(intercept=y.min(), slope=0)
+pars += step_mod.guess(y, x=x, center=2.5)
 
-print(mod.fit_report())
+mod = step_mod + line_mod
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report())
 
 plt.plot(x, y)
 plt.plot(x, out.init_fit, 'k--')
diff --git a/examples/doc_withreport.py b/examples/doc_withreport.py
new file mode 100644
index 0000000..6163719
--- /dev/null
+++ b/examples/doc_withreport.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+#<examples/doc_withreport.py>
+
+from __future__ import print_function
+from lmfit import Parameters, minimize, fit_report
+from numpy import random, linspace, pi, exp, sin, sign
+
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('period', value=5.46)
+p_true.add('shift', value=0.123)
+p_true.add('decay', value=0.032)
+
+def residual(pars, x, data=None):
+    vals = pars.valuesdict()
+    amp =  vals['amp']
+    per =  vals['period']
+    shift = vals['shift']
+    decay = vals['decay']
+    
+    if abs(shift) > pi/2:
+        shift = shift - sign(shift)*pi
+    model = amp * sin(shift + x/per) * exp(-x*x*decay*decay)
+    if data is None:
+        return model
+    return (model - data)
+
+n = 1001
+xmin = 0.
+xmax = 250.0
+
+random.seed(0)
+
+noise = random.normal(scale=0.7215, size=n)
+x     = linspace(xmin, xmax, n)
+data  = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=13.0)
+fit_params.add('period', value=2)
+fit_params.add('shift', value=0.0)
+fit_params.add('decay', value=0.02)
+
+out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+
+fit = residual(fit_params, x)
+print(fit_report(fit_params))
+
+#<end of examples/doc_withreport.py>
diff --git a/examples/example_peakmodel.py b/examples/example_peakmodel.py
deleted file mode 100644
index 686f6a3..0000000
--- a/examples/example_peakmodel.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-Example using the built-in Peak-like models
-"""
-import numpy as np
-from lmfit.old_models1d import GaussianModel, LorentzianModel, VoigtModel
-import matplotlib.pyplot as plt
-
-x  = np.linspace(0, 10, 101)
-
-sca = 1./(2.0*np.sqrt(2*np.pi))
-noise =  5e-2*np.random.randn(len(x))
-dat = 2.60 -0.04*x + 7.5 * np.exp(-(x-4.0)**2 / (2*0.35)**2) + noise
-
-mod = GaussianModel(background='linear')
-# mod = VoigtModel(background='linear')
-# mod = LorentzianModel(background='linear')
-
-mod.guess_starting_values(dat, x)
-
-
-plt.plot(x, dat)
-
-# initial guess
-plt.plot(x, mod.model(x=x) + mod.calc_background(x), 'r+')
-
-mod.fit(dat, x=x)
-
-print mod.fit_report()
-
-# best fit
-plt.plot(x, mod.model(x=x) + mod.calc_background(x))
-plt.show()
-
diff --git a/examples/models_doc1.py b/examples/models_doc1.py
deleted file mode 100644
index 29c0641..0000000
--- a/examples/models_doc1.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-#<examples/models_doc1.py>
-from numpy import loadtxt
-from lmfit import fit_report
-from lmfit.models import GaussianModel, VoigtModel
-import matplotlib.pyplot as plt
-
-
-data = loadtxt('test_peak.dat')
-x = data[:, 0]
-y = data[:, 1]
-
-gmodel = GaussianModel()
-gmodel.guess_starting_values(y, x=x)
-gresult = gmodel.fit(y, x=x)
-
-print 'With Gaussian: '
-print fit_report(gresult.params, min_correl=0.25)
-print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (gresult.chisqr, gresult.redchi)
-plt.plot(x, y,         'k')
-plt.plot(x, 10*(y-gresult.best_fit), 'r-')
-
-
-vmodel = VoigtModel()
-vmodel.guess_starting_values(y, x=x)
-vresult = vmodel.fit(y, x=x)
-
-print 'With Voigt: '
-print fit_report(vresult.params, min_correl=0.25)
-print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (vresult.chisqr, vresult.redchi)
-
-plt.plot(x, 10*(y-vresult.best_fit), 'b-')
-
-
-vmodel.params['gamma'].vary = True
-vmodel.params['gamma'].expr = None
-
-vresult2 = vmodel.fit(y, x=x)
-
-print 'With Voigt, varying gamma: '
-print fit_report(vresult2.params, min_correl=0.25)
-print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (vresult2.chisqr, vresult2.redchi)
-plt.plot(x, 10*(y-vresult2.best_fit), 'g-')
-
-plt.show()
-
-#<end examples/models_doc1.py>
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index fdcf8ce..5329d4c 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -1,35 +1,50 @@
 """
-   LMfit-py provides a Least-Squares Minimization routine and
-   class with a simple, flexible approach to parameterizing a
-   model for fitting to data.  Named Parameters can be held
-   fixed or freely adjusted in the fit, or held between lower
-   and upper bounds.  If the separate asteval module has been
-   installed, parameters can be constrained as a simple
-   mathematical expression of other Parameters.
-
-   version: 0.8.0-rc1
-   last update: 2014-Aug-13
-   License: BSD
-   Author:  Matthew Newville <newville at cars.uchicago.edu>
-            Center for Advanced Radiation Sources,
-            The University of Chicago
+Lmfit provides a high-level interface to non-linear optimization and curve
+fitting problems for Python. Lmfit builds on Levenberg-Marquardt algorithm of
+scipy.optimize.leastsq(), but also supports most of the optimization methods
+from scipy.optimize.  It has a number of useful enhancements, including:
+
+  * Using Parameter objects instead of plain floats as variables.  A Parameter
+    has a value that can be varied in the fit, fixed, have upper and/or lower
+    bounds.  It can even have a value that is constrained by an algebraic
+    expression of other Parameter values.
+
+  * Ease of changing fitting algorithms.  Once a fitting model is set up, one
+    can change the fitting algorithm without changing the objective function.
+
+  * Improved estimation of confidence intervals.  While
+    scipy.optimize.leastsq() will automatically calculate uncertainties and
+    correlations from the covariance matrix, lmfit also has functions to
+    explicitly explore parameter space to determine confidence levels even for
+    the most difficult cases.
+
+  * Improved curve-fitting with the Model class.  This which extends the
+    capabilities of scipy.optimize.curve_fit(), allowing you to turn a function
+    that models for your data into a python class that helps you parametrize
+    and fit data with that model.
+
+  * Many pre-built models for common lineshapes are included and ready to use.
+
+   version: 0.8.0
+   last update: 2014-Sep-21
+   License: MIT
+   Authors:  Matthew Newville, The University of Chicago
+             Till Stensitzki, Freie Universitat Berlin
+             Daniel B. Allen, Johns Hopkins University
+             Antonino Ingargiola, University of California, Los Angeles
 """
-__version__ = '0.8.0-rc1'
+__version__ = '0.8.0'
+
 from .minimizer import minimize, Minimizer, MinimizerException
 from .parameter import Parameter, Parameters
 from .confidence import conf_interval, conf_interval2d
 from .printfuncs import (fit_report, ci_report,
                          report_fit, report_ci, report_errors)
 
-from .wrap  import wrap_function, make_paras_and_func
 from .model import Model
 from . import models
 
 from . import uncertainties
 from .uncertainties import ufloat, correlated_values
 
-__xall__ = ['minimize', 'Minimizer', 'Parameter', 'Parameters',
-            'conf_interval', 'conf_interval2d', 'wrap_function',
-            'make_paras_and_func', 'fit_report', 'ci_report',
-            'report_errors', 'report_fit', 'report_ci', 'ufloat',
-            'correlated_values']
+from .ui import Fitter
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index 06eb7ae..87004b2 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -43,14 +43,18 @@ def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
     return ((1-fraction)*gaussian(x, amplitude, center, sigma) +
                fraction*lorentzian(x, amplitude, center, sigma))
 
-def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=0.5):
-    """pearson7 lineshape, according to NIST StRD
-    though it seems wikpedia gives a different formula...
-    pearson7(x, center, sigma, expon)
+def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
+    """pearson7 lineshape, using the wikipedia definition:
+
+    pearson7(x, center, sigma, expon) =
+      amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
+
+    where arg = (x-center)/sigma
+    and beta() is the beta function.
     """
-    scale = amplitude * gamfcn(expon) * (sqrt((2**(1/expon) -1)) /
-                                         (gamfcn(expon-0.5)) / (sigma*spi))
-    return scale/(1 + (((1.0*x-center)/sigma)**2) * (2**(1/expon) -1))**expon
+    arg = (x-center)/sigma
+    scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
+    return  scale*(1+arg**2)**(-expon)/sigma
 
 def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
     """Breit-Wigner-Fano lineshape:
@@ -77,6 +81,7 @@ def lognormal(x, amplitude=1.0, center=0., sigma=1):
     lognormal(x, center, sigma)
         = (amplitude/x) * exp(-(ln(x) - center)/ (2* sigma**2))
     """
+    x[where(x<=1.e-19)] = 1.e-19
     return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2/ (2* sigma**2))
 
 def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
@@ -117,6 +122,21 @@ def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
     scale = amplitude/(sigma**gm1)
     return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
 
+def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
+    """Gaussian, skewed with error function, equal to
+
+     gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
+
+    with beta = gamma/(sigma*sqrt(2))
+
+    with  gamma < 0:  tail to low value of centroid
+          gamma > 0:  tail to high value of centroid
+
+    see http://en.wikipedia.org/wiki/Skew_normal_distribution
+    """
+    asym = 1 + erf(gamma*(x-center)/(s2*sigma))
+    return asym * gaussian(x, amplitude, center, sigma)
+
 def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
     """Skewed Voigt lineshape, skewed with error function
     useful for ad-hoc Compton scatter profile
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index f427783..11e0b00 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -99,6 +99,18 @@ def check_ast_errors(error):
             return msg
     return None
 
+SCALAR_METHODS = {'nelder': 'Nelder-Mead',
+                  'powell': 'Powell',
+                  'cg': 'CG',
+                  'bfgs': 'BFGS',
+                  'newton': 'Newton-CG',
+                  'lbfgs': 'L-BFGS-B',
+                  'l-bfgs':'L-BFGS-B',
+                  'tnc': 'TNC',
+                  'cobyla': 'COBYLA',
+                  'slsqp': 'SLSQP',
+                  'dogleg': 'dogleg',
+                  'trust-ncg': 'trust-ncg'}
 
 class Minimizer(object):
     """general minimizer"""
@@ -298,10 +310,6 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             if hasattr(par, 'ast'):
                 delattr(par, 'ast')
 
-    def anneal(self, schedule='cauchy', **kws):
-        """scipy simulated annealing is broken"""
-        raise NotImplementedError("scipy simulated annealing is broken")
-
     def lbfgsb(self, **kws):
         """
         use l-bfgs-b minimization
@@ -549,54 +557,36 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.unprepare_fit()
         return self.success
 
+    def minimize(self, method='leastsq'):
+        """perform minimization using supplied method"""
+        function = self.leastsq
+        kwargs = {}
+
+        user_method = method.lower()
+        if user_method.startswith('least'):
+            function = self.leastsq
+        elif HAS_SCALAR_MIN:
+            function = self.scalar_minimize
+            for key, val in SCALAR_METHODS.items():
+                if (key.lower().startswith(user_method) or
+                    val.lower().startswith(user_method)):
+                    kwargs = dict(method=val)
+        elif (user_method.startswith('nelder') or
+              user_method.startswith('fmin')):
+            function = self.fmin
+        elif user_method.startswith('lbfgsb'):
+            function = self.lbfgsb
+
+        return function(**kwargs)
+
 
 def minimize(fcn, params, method='leastsq', args=None, kws=None,
-             scale_covar=True, engine=None, iter_cb=None, **fit_kws):
+             scale_covar=True, iter_cb=None, **fit_kws):
     """simple minimization function,
     finding the values for the params which give the
     minimal sum-of-squares of the array return by fcn
     """
     fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
                        iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
-
-    _scalar_methods = {'nelder': 'Nelder-Mead',
-                       'powell': 'Powell',
-                       'cg': 'CG',
-                       'bfgs': 'BFGS',
-                       'newton': 'Newton-CG',
-                       'lbfgs': 'L-BFGS-B',
-                       'l-bfgs':'L-BFGS-B',
-                       'tnc': 'TNC',
-                       'cobyla': 'COBYLA',
-                       'slsqp': 'SLSQP',
-                       'dogleg': 'dogleg',
-                       'trust-ncg': 'trust-ncg'}
-
-    _fitmethods = {'nelder': 'fmin',
-                   'lbfgsb': 'lbfgsb',
-                   'leastsq': 'leastsq'}
-
-    if engine is not None:
-        method = engine
-    meth = method.lower()
-
-    fitfunction = None
-    kwargs = {}
-    # default and most common option: use leastsq method.
-    if meth == 'leastsq':
-        fitfunction = fitter.leastsq
-    else:
-        # if scalar_minimize() is supported and method is in list, use it.
-        if HAS_SCALAR_MIN:
-            for name, method in _scalar_methods.items():
-                if meth.startswith(name):
-                    fitfunction = fitter.scalar_minimize
-                    kwargs = dict(method=method)
-        # look for other built-in methods
-        if fitfunction is None:
-            for name, method in _fitmethods.items():
-                if meth.startswith(name):
-                    fitfunction = getattr(fitter, method)
-    if fitfunction is not None:
-        fitfunction(**kwargs)
+    fitter.minimize(method=method)
     return fitter
diff --git a/lmfit/model.py b/lmfit/model.py
index 8960745..b08972e 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -1,12 +1,12 @@
 """
 Concise nonlinear curve fitting.
 """
-
+from __future__ import print_function
 import warnings
 import inspect
-import copy
+from copy import deepcopy
 import numpy as np
-from . import Parameters, Parameter, minimize
+from . import Parameters, Parameter, Minimizer
 from .printfuncs import fit_report
 
 # Use pandas.isnull for aligning missing data is pandas is available.
@@ -42,6 +42,9 @@ class Model(object):
             numpy.isnan is used.
         'raise': Raise a (more helpful) exception when data contains null
             or missing values.
+    name: None or string
+        name for the model. When `None` (default) the name is the same as
+        the model function (`func`).
 
     Note
     ----
@@ -60,29 +63,110 @@ class Model(object):
     _invalid_ivar  = "Invalid independent variable name ('%s') for function %s"
     _invalid_par   = "Invalid parameter name ('%s') for function %s"
     _invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
+    _valid_missing   = (None, 'none', 'drop', 'raise')
+    _names_collide = "Two models have parameters named %s. Use distinct names"
+
+    _invalid_hint = "unknown parameter hint '%s' for param '%s'"
+    _hint_names = ('value', 'vary', 'min', 'max', 'expr')
+
     def __init__(self, func, independent_vars=None, param_names=None,
-                 missing='none', prefix='', components=None, **kws):
+                 missing='none', prefix='', name=None, **kws):
         self.func = func
-        self.prefix = prefix
-        self.param_names = param_names
+        self._prefix = prefix
+        self._param_root_names = param_names  # will not include prefixes
         self.independent_vars = independent_vars
-        self.func_allargs = []
-        self.func_haskeywords = False
-        self.has_initial_guess = False
-        self.components = components
-        if not missing in [None, 'none', 'drop', 'raise']:
+        self.components = []
+        self._func_allargs = []
+        self._func_haskeywords = False
+        if not missing in self._valid_missing:
             raise ValueError(self._invalid_missing)
         self.missing = missing
         self.opts = kws
-        self.result = None
-        self.params = Parameters()
+        self.param_hints = {}
+        self._param_names = set()
         self._parse_params()
-        self._residual = self._build_residual()
         if self.independent_vars is None:
             self.independent_vars = []
+        if name is None and hasattr(self.func, '__name__'):
+            name = self.func.__name__
+        self._name = name
+
+    def _reprstring(self, long=False):
+        if not self.is_composite:
+            # base model
+            opts = []
+            if len(self._prefix) > 0:
+                opts.append("prefix='%s'" % (self._prefix))
+            if long:
+                for k, v in self.opts.items():
+                    opts.append("%s='%s'" % (k, v))
+
+            out = ["%s" % self._name]
+            if len(opts) > 0:
+                out[0] = "%s(%s)" % (out[0], ','.join(opts))
+        else:
+            # composite model
+            if self._name is None:
+                out = [c._reprstring(long)[0] for c in self.components]
+            else:
+                out = [self._name]
+        return out
+
+    @property
+    def name(self):
+        return '+'.join(self._reprstring(long=False))
+
+    @name.setter
+    def name(self, value):
+        self._name = value
+
+    @property
+    def prefix(self):
+        return self._prefix
+
+    @prefix.setter
+    def prefix(self, value):
+        self._prefix = value
+        self._parse_params()
+
+    @property
+    def param_names(self):
+        if self.is_composite:
+            return self._compute_composite_param_names()
+        else:
+            return self._param_names
+
+    def _compute_composite_param_names(self):
+        param_names = set()
+        for sub_model in self.components:
+            param_names |= sub_model.param_names
+        param_names |= self._param_names
+        return param_names
+
+    @property
+    def is_composite(self):
+        return len(self.components) > 0
+
+    def __repr__(self):
+        return  "<lmfit.Model: %s>" % (self.name)
+
+    def copy(self, prefix=None):
+        """Return a completely independent copy of the whole model.
+
+        Parameters
+        ----------
+        prefix: string or None. If not None new model's prefix is
+            changed to the passed value.
+        """
+        new = deepcopy(self)
+        if prefix is not None:
+            new.prefix = prefix
+        return new
 
     def _parse_params(self):
         "build params from function arguments"
+        if self.func is None:
+            return
         argspec = inspect.getargspec(self.func)
         pos_args = argspec.args[:]
         keywords = argspec.keywords
@@ -91,9 +175,9 @@ class Model(object):
             for val in reversed(argspec.defaults):
                 kw_args[pos_args.pop()] = val
         #
-        self.func_haskeywords = keywords is not None
-        self.func_allargs = pos_args + list(kw_args.keys())
-        allargs = self.func_allargs
+        self._func_haskeywords = keywords is not None
+        self._func_allargs = pos_args + list(kw_args.keys())
+        allargs = self._func_allargs
 
         if len(allargs) == 0 and keywords is not None:
             return
@@ -104,17 +188,36 @@ class Model(object):
 
         # default param names: all positional args
         # except independent variables
-        def_vals = {}
-        if self.param_names is None:
-            self.param_names = pos_args[:]
+        self.def_vals = {}
+        might_be_param = []
+        if self._param_root_names is None:
+            self._param_root_names = pos_args[:]
             for key, val in kw_args.items():
                 if (not isinstance(val, bool) and
                     isinstance(val, (float, int))):
-                    self.param_names.append(key)
-                    def_vals[key] = val
+                    self._param_root_names.append(key)
+                    self.def_vals[key] = val
+                elif val is None:
+                    might_be_param.append(key)
             for p in self.independent_vars:
-                if p in self.param_names:
-                    self.param_names.remove(p)
+                if p in self._param_root_names:
+                    self._param_root_names.remove(p)
+
+        new_opts = {}
+        for opt, val in self.opts.items():
+            if (opt in self._param_root_names or opt in might_be_param and
+                isinstance(val, Parameter)):
+                self.set_param_hint(opt, value=val.value,
+                                    min=val.min, max=val.max, expr=val.expr)
+            elif opt in self._func_allargs:
+                new_opts[opt] = val
+        self.opts = new_opts
+
+        names = []
+        if self._prefix is None:
+            self._prefix = ''
+        for pname in self._param_root_names:
+            names.append("%s%s" % (self._prefix, pname))
 
         # check variables names for validity
         # The implicit magic in fit() requires us to disallow some
@@ -122,57 +225,114 @@ class Model(object):
         for arg in self.independent_vars:
             if arg not in allargs or arg in self._forbidden_args:
                 raise ValueError(self._invalid_ivar % (arg, fname))
-        for arg in self.param_names:
-            if arg not in allargs or arg in self._forbidden_args:
+        for arg in names:
+            if (self._strip_prefix(arg) not in allargs or
+                arg in self._forbidden_args):
                 raise ValueError(self._invalid_par % (arg, fname))
 
-        names = []
-        for pname in self.param_names:
-            if not pname.startswith(self.prefix):
-                pname = "%s%s" % (self.prefix, pname)
-            names.append(pname)
-        self.param_names = set(names)
-        for name in self.param_names:
-            self.params.add(name)
-        for key, val in def_vals.items():
-            self.set_paramval(key, val)
-
-    def guess_starting_values(self, data=None, **kws):
-        """stub for guess starting values --
-        should be implemented for each model subclass
+        self._param_names = set(names)
+
+    def set_param_hint(self, name, **kwargs):
+        """set hints for parameter, including optional bounds
+        and constraints  (value, vary, min, max, expr)
+        these will be used by make_params() when building
+        default parameters
+
+        example:
+          model = GaussianModel()
+          model.set_param_hint('amplitude', min=-100.0, max=0.)
         """
+        npref = len(self._prefix)
+        if npref > 0 and name.startswith(self._prefix):
+            name = name[npref:]
+
+        if name not in self.param_hints:
+            self.param_hints[name] = {}
+        hints = self.param_hints[name]
+        for key, val in kwargs.items():
+            if key in self._hint_names:
+                hints[key] = val
+            else:
+                warnings.warn(self._invalid_hint % (key, name))
+
+    def make_params(self, **kwargs):
+        """create and return a Parameters object for a Model.
+        This applies any default values
+        """
+        verbose = False
+        if 'verbose' in kwargs:
+            verbose = kwargs['verbose']
+        params = Parameters()
+        if not self.is_composite:
+            # base model: build Parameters from scratch
+            for name in self.param_names:
+                par = Parameter(name=name)
+                basename = name[len(self._prefix):]
+                # apply defaults from model function definition
+                if basename in self.def_vals:
+                    par.value = self.def_vals[basename]
+                # apply defaults from parameter hints
+                if basename in self.param_hints:
+                    hint = self.param_hints[basename]
+                    for item in self._hint_names:
+                        if item in  hint:
+                            setattr(par, item, hint[item])
+                # apply values passed in through kw args
+                if basename in kwargs:
+                    # kw parameter names with no prefix
+                    par.value = kwargs[basename]
+                if name in kwargs:
+                    # kw parameter names with prefix
+                    par.value = kwargs[name]
+                params[name] = par
+        else:
+            # composite model: merge the sub_models parameters adding hints
+            for sub_model in self.components:
+                comp_params = sub_model.make_params(**kwargs)
+                for par_name, param in comp_params.items():
+                    # apply composite-model hints
+                    if par_name in self.param_hints:
+                        hint = self.param_hints[par_name]
+                        for item in self._hint_names:
+                            if item in  hint:
+                                setattr(param, item, hint[item])
+                params.update(comp_params)
+
+            # apply defaults passed in through kw args
+            for name in self.param_names:
+                if name in kwargs:
+                    params[name].value = kwargs[name]
+
+        # add any additional parameters defined in param_hints
+        # note that composites may define their own additional
+        # convenience parameters here
+        for basename, hint in self.param_hints.items():
+            name = "%s%s" % (self._prefix, basename)
+            if name not in params:
+                par = params[name] = Parameter(name=name)
+                for item in self._hint_names:
+                    if item in  hint:
+                        setattr(par, item, hint[item])
+                # Add the new parameter to the self.param_names
+                self._param_names.add(name)
+                if verbose: print( ' - Adding parameter "%s"' % name)
+        return params
+
+    def guess(self, data=None, **kws):
+        """stub for guess starting values --
+        should be implemented for each model subclass to
+        run self.make_params(), update starting values
+        and return a Parameters object"""
         cname = self.__class__.__name__
-        msg = 'guess_starting_values() not implemented for %s' % cname
+        msg = 'guess() not implemented for %s' % cname
         raise NotImplementedError(msg)
 
-    def _build_residual(self):
-        "Generate and return a residual function."
-        def residual(params, data, weights, **kwargs):
-            "default residual:  (data-model)*weights"
-            diff = self.eval(params=params, **kwargs) - data
-            if weights is not None:
-                diff *= weights
-            return np.asarray(diff)  # for compatibility with pandas.Series
-        return residual
-
-    def make_funcargs(self, params, kwargs):
-        """convert parameter values and keywords to function arguments"""
-        out = {}
-        out.update(self.opts)
-        npref = len(self.prefix)
-        for name, par in params.items():
-            if npref > 0 and name.startswith(self.prefix):
-                name = name[npref:]
-            if name in self.func_allargs or self.func_haskeywords:
-                out[name] = par.value
-        for name, val in kwargs.items():
-            if name in self.func_allargs or self.func_haskeywords:
-                out[name] = val
-                if name in params:
-                    params[name].value = val
-        if self.func_haskeywords and self.components is not None:
-            out['__components__'] = self.components
-        return out
+    def _residual(self, params, data, weights, **kwargs):
+        "default residual:  (data-model)*weights"
+        diff = self.eval(params, **kwargs) - data
+        if weights is not None:
+            diff *= weights
+        return np.asarray(diff)  # for compatibility with pandas.Series
 
     def _handle_missing(self, data):
         "handle missing data"
@@ -186,48 +346,79 @@ class Model(object):
             mask = np.asarray(mask)  # for compatibility with pandas.Series
             return mask
 
-    def set_paramval(self, paramname, value, min=None, max=None, vary=True):
-        """set parameter value, as for initial guess.
-        name can include prefix or not
-        """
-        pname = paramname
-        if pname not in self.params:
-            pname = "%s%s" % (self.prefix, pname)
-        if pname not in self.params:
-            raise KeyError("'%s' not a parameter name" % pname)
-        self.params[pname].value = value
-        self.params[pname].vaary = vary
-        if min is not None:
-            self.params[pname].min = min
-        if max is not None:
-            self.params[pname].max = max
+    def _strip_prefix(self, name):
+        npref = len(self._prefix)
+        if npref > 0 and name.startswith(self._prefix):
+            name = name[npref:]
+        return name
+
+    def make_funcargs(self, params=None, kwargs=None, strip=True):
+        """convert parameter values and keywords to function arguments"""
+        if params is None: params = {}
+        if kwargs is None: kwargs = {}
+        out = {}
+        out.update(self.opts)
+        for name, par in params.items():
+            if strip:
+                name = self._strip_prefix(name)
+            if name in self._func_allargs or self._func_haskeywords:
+                out[name] = par.value
+
+        # kwargs handled slightly differently -- may set param value too!
+        for name, val in kwargs.items():
+            if strip:
+                name = self._strip_prefix(name)
+            if name in self._func_allargs or self._func_haskeywords:
+                out[name] = val
+                if name in params:
+                    params[name].value = val
+        return out
+
+    def _make_all_args(self, params=None, **kwargs):
+        """generate **all** function args for all functions"""
+        args = {}
+        for key, val in self.make_funcargs(params, kwargs).items():
+            args["%s%s" % (self._prefix, key)] = val
+        for sub_model in self.components:
+            otherargs = sub_model._make_all_args(params, **kwargs)
+            args.update(otherargs)
+        return args
 
     def eval(self, params=None, **kwargs):
-        """evaluate the model with the supplied or current parameters"""
-        if params is None:
-            params = self.params
-        fcnargs = self.make_funcargs(params, kwargs)
-        return self.func(**fcnargs)
+        """evaluate the model with the supplied parameters"""
+        if len(self.components) > 0:
+            result = self.components[0].eval(params, **kwargs)
+            for model in self.components[1:]:
+                result += model.eval(params, **kwargs)
+        else:
+            result = self.func(**self.make_funcargs(params, kwargs))
+            # Handle special case of constant result and one
+            # independent variable (of any dimension).
+            if np.ndim(result) == 0 and len(self.independent_vars) == 1:
+                result = np.tile(result, kwargs[self.independent_vars[0]].shape)
+        return result
 
     def fit(self, data, params=None, weights=None, method='leastsq',
-            iter_cb=None, scale_covar=True, **kwargs):
+            iter_cb=None, scale_covar=True, verbose=True, **kwargs):
         """Fit the model to the data.
 
         Parameters
         ----------
         data: array-like
-        params: Parameters object, optional
+        params: Parameters object
         weights: array-like of same size as data
             used for weighted fit
         method: fitting method to use (default = 'leastsq')
         iter_cb:  None or callable  callback function to call at each iteration.
         scale_covar:  bool (default True) whether to auto-scale covariance matrix
+        verbose: bool (default True) print a message when a new parameter is
+            added because of a hint.
         keyword arguments: optional, named like the arguments of the
             model function, will override params. See examples below.
 
         Returns
         -------
-        lmfit.Minimizer
+        lmfit.ModelFit
 
         Examples
         --------
@@ -250,9 +441,9 @@ class Model(object):
 
         """
         if params is None:
-            params = self.params
+            params = self.make_params(verbose=verbose)
         else:
-            params = copy.deepcopy(params)
+            params = deepcopy(params)
 
         # If any kwargs match parameter names, override params.
         param_kwargs = set(kwargs.keys()) & self.param_names
@@ -260,14 +451,11 @@ class Model(object):
             p = kwargs[name]
             if isinstance(p, Parameter):
                 p.name = name  # allows N=Parameter(value=5) with implicit name
-                params[name] = copy.deepcopy(p)
+                params[name] = deepcopy(p)
             else:
-                params[name] = Parameter(name=name, value=p)
+                params[name].set(value=p)
             del kwargs[name]
 
-        # Keep a pristine copy of the initial params.
-        init_params = copy.deepcopy(params)
-
         # All remaining kwargs should correspond to independent variables.
         for name in kwargs.keys():
             if not name in self.independent_vars:
@@ -284,6 +472,14 @@ class Model(object):
             raise ValueError("""Assign each parameter an initial value by
  passing Parameters or keyword arguments to fit""")
 
+        # Do not alter anything that implements the array interface (np.array, pd.Series)
+        # but convert other iterables (e.g., Python lists) to numpy arrays.
+        if not hasattr(data, '__array__'):
+            data = np.asfarray(data)
+        for var in self.independent_vars:
+            var_data = kwargs[var]
+            if (not hasattr(var_data, '__array__')) and (not np.isscalar(var_data)):
+                kwargs[var] = np.asfarray(var_data)
 
         # Handle null/missing values.
         mask = None
@@ -297,76 +493,118 @@ class Model(object):
         # If independent_vars and data are alignable (pandas), align them,
         # and apply the mask from above if there is one.
         for var in self.independent_vars:
-            if not np.isscalar(self.independent_vars):  # just in case
+            if not np.isscalar(kwargs[var]):
                 kwargs[var] = _align(kwargs[var], mask, data)
 
-        kwargs['__components__'] = self.components
-        result = minimize(self._residual, params, args=(data, weights),
-                          method=method, iter_cb=iter_cb,
-                          scale_covar=scale_covar, kws=kwargs)
-
-        # Monkey-patch the Minimizer object with some extra information.
-        result.model = self
-        result.init_params = init_params
-        result.init_values = self.make_funcargs(init_params, {})
-        if '__components__' in result.init_values:
-            result.init_values.pop('__components__')
-        result.init_fit = self.eval(params=init_params, **kwargs)
-        result.best_fit = self.eval(params=result.params, **kwargs)
-        self.result = result
-        return result
-
-    def fit_report(self, modelpars=None, show_correl=True, min_correl=0.1):
-        "return fit report"
-        if self.result is None:
-            raise ValueError("must run .fit() first")
-
-        return fit_report(self.result, modelpars=modelpars,
-                          show_correl=show_correl,
-                          min_correl=min_correl)
+        output = ModelFit(self, params, method=method, iter_cb=iter_cb,
+                          scale_covar=scale_covar, fcn_kws=kwargs)
+        output.fit(data=data, weights=weights)
+        return output
 
     def __add__(self, other):
         colliding_param_names = self.param_names & other.param_names
         if len(colliding_param_names) != 0:
             collision = colliding_param_names.pop()
-            raise NameError("Both models have parameters called " +
-                            "%s. Redefine the models " % collision +
-                            "with distinct names.")
-
-        def composite_func(**kwargs):
-            "composite model function"
-            components = kwargs.get('__components__', None)
-            out = None
-            if components is not None:
-                for comp in components:
-                    pars = Parameters()
-                    prefix = comp.prefix
-                    for p in self.params.values():
-                        if p.name.startswith(prefix):
-                            pars.__setitem__(p.name, p)
-                            pars[p.name].value = kwargs[p.name]
-
-                    fcnargs = comp.make_funcargs(pars, kwargs)
-                    comp_out = comp.func(**fcnargs)
-                    if out is None:
-                        out = np.zeros_like(comp_out)
-                    out += comp_out
-            return out
-
-        components = self.components
-        if components is None:
-            components = [self]
-        if other.components is None:
-            components.append(other)
+            raise NameError(self._names_collide % collision)
+
+        # If the model is already composite just add other as component
+        composite = self
+        if not self.is_composite:
+            # make new composite Model, add self and other as components
+            composite = Model(func=None)
+            composite.components = [self]
+            # we assume that all the sub-models have the same independent vars
+            composite.independent_vars = self.independent_vars[:]
+
+        if other.is_composite:
+            composite.components.extend(other.components)
+            composite.param_hints.update(other.param_hints)
+        else:
+            composite.components.append(other)
+        return composite
+
+
+class ModelFit(Minimizer):
+    """Result from Model fit
+
+    Attributes
+    -----------
+    model         instance of Model -- the model function
+    params        instance of Parameters -- the fit parameters
+    data          array of data values to compare to model
+    weights       array of weights used in fitting
+    init_params   copy of params, before being updated by fit()
+    init_values   array of parameter values, before being updated by fit()
+    init_fit      model evaluated with init_params.
+    best_fit      model evaluated with params after being updated by fit()
+
+    Methods:
+    --------
+    fit(data=None, params=None, weights=None, method=None, **kwargs)
+         fit (or re-fit) model with params to data (with weights)
+         using supplied method.  The keyword arguments are sent to
+         as keyword arguments to the model function.
+
+         all inputs are optional, defaulting to the value used in
+         the previous fit.  This allows easily changing data or
+         parameter settings, or both.
+
+    eval(**kwargs)
+         evaluate the current model, with the current parameter values,
+         with values in kwargs sent to the model function.
+
+   fit_report(modelpars=None, show_correl=True, min_correl=0.1)
+         return a fit report.
+
+    """
+    def __init__(self, model, params, data=None, weights=None,
+                 method='leastsq', fcn_args=None, fcn_kws=None,
+                 iter_cb=None, scale_covar=True, **fit_kws):
+        self.model = model
+        self.data = data
+        self.weights = weights
+        self.method = method
+        self.init_params = deepcopy(params)
+        Minimizer.__init__(self, model._residual, params, fcn_args=fcn_args,
+                           fcn_kws=fcn_kws, iter_cb=iter_cb,
+                           scale_covar=scale_covar, **fit_kws)
+
+    def fit(self, data=None, params=None, weights=None, method=None, **kwargs):
+        """perform fit for a Model, given data and params"""
+        if data is not None:
+            self.data = data
+        if params is not None:
+            self.params = params
+        if weights is not None:
+            self.weights = weights
+        if method is not None:
+            self.method = method
+        self.userargs = (self.data, self.weights)
+        self.userkws.update(kwargs)
+        self.init_params = deepcopy(self.params)
+        self.init_values = self.model._make_all_args(self.init_params)
+        self.init_fit    = self.model.eval(params=self.init_params, **self.userkws)
+
+        self.minimize(method=self.method)
+        self.best_fit = self.model.eval(params=self.params, **self.userkws)
+        self.best_values = self.model._make_all_args(self.params)
+
+    def eval(self, **kwargs):
+        self.userkws.update(kwargs)
+        return self.model.eval(params=self.params, **self.userkws)
+
+    def fit_report(self, modelpars=None, show_correl=True, min_correl=0.1):
+        "return fit report"
+        stats_report = fit_report(self, modelpars=modelpars,
+                                 show_correl=show_correl,
+                                 min_correl=min_correl)
+        buff = ['[[Model]]']
+        if len(self.model.components)==0:
+            buff.append('    %s' % self.model._reprstring(long=True)[0])
         else:
-            components.extend(other.components)
-        all_params = self.params
-        for key, val in other.params.items():
-            all_params[key] = val
-
-        out = Model(func=composite_func, independent_vars=self.independent_vars,
-                    param_names=self.param_names | other.param_names,
-                    missing=self.missing, components=components)
-        out.components = components
-        out.params = all_params
+            buff.append(' Composite Model:')
+            for x in self.model._reprstring(long=True):
+                buff.append('    %s' % x)
+        buff = '\n'.join(buff)
+        out = '%s\n%s' % (buff, stats_report)
         return out
diff --git a/lmfit/models.py b/lmfit/models.py
index f2a514a..f2242b2 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -1,12 +1,12 @@
 import numpy as np
 from .model import Model
-from .parameter import Parameter
 
 from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, pearson7,
                          step, rectangle, breit_wigner, logistic,
                          students_t, lognormal, damped_oscillator,
-                         expgaussian, donaich, skewed_voigt, exponential,
-                         powerlaw, linear, parabolic)
+                         expgaussian, skewed_gaussian, donaich,
+                         skewed_voigt, exponential, powerlaw, linear,
+                         parabolic)
 
 class DimensionalError(Exception):
     pass
@@ -23,8 +23,12 @@ def index_of(arr, val):
         return 0
     return np.abs(arr-val).argmin()
 
-def estimate_peak(y, x, negative):
-    "estimate amp, cen, sigma for a peak"
+def fwhm_expr(model):
+    "return constraint expression for fwhm"
+    return "%.7f*%ssigma" % (model.fwhm_factor, model.prefix)
+
+def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
+    "estimate amp, cen, sigma for a peak, create params"
     if x is None:
         return 1.0, 0.0, 1.0
     maxy, miny = max(y), min(y)
@@ -42,7 +46,21 @@ def estimate_peak(y, x, negative):
     if len(halfmax_vals) > 2:
         sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
         cen = x[halfmax_vals].mean()
-    return amp*sig, cen, sig
+    amp = amp*sig*ampscale
+    sig = sig*sigscale
+
+    pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
+    pars['%ssigma' % model.prefix].set(min=0.0)
+    return pars
+
+def update_param_vals(pars, prefix, **kwargs):
+    """convenience function to update parameter values
+    with keyword arguments"""
+    for key, val in kwargs.items():
+        pname = "%s%s" % (prefix, key)
+        if pname in pars:
+            pars[pname].value = val
+    return pars
 
 COMMON_DOC = """
 
@@ -61,41 +79,41 @@ suffix: string to append to paramter names, needed to add two Models that
 """
 class ConstantModel(Model):
     __doc__ = "x -> c" + COMMON_DOC
-    def __init__(self, **kwargs):
-        def func(x, c):
+    def __init__(self, *args, **kwargs):
+        def constant(x, c):
             return c
-        super(ConstantModel, self).__init__(func, **kwargs)
+        super(ConstantModel, self).__init__(constant, *args, **kwargs)
+
+    def guess(self, data, **kwargs):
+        pars = self.make_params()
+        pars['%sc' % self.prefix].set(value=data.mean())
+        return update_param_vals(pars, self.prefix, **kwargs)
 
-    def guess_starting_values(self, data, **kwargs):
-        self.set_paramval('c', data.mean())
-        self.has_initial_guess = True
 
 class LinearModel(Model):
     __doc__ = linear.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(LinearModel, self).__init__(linear, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(LinearModel, self).__init__(linear, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, **kwargs):
+    def guess(self, data, x=None, **kwargs):
         sval, oval = 0., 0.
         if x is not None:
             sval, oval = np.polyfit(x, data, 1)
-        self.set_paramval('intercept', oval)
-        self.set_paramval('sslope' , sval)
-        self.has_initial_guess = True
+        pars = self.make_params(intercept=oval, slope=sval)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class QuadraticModel(Model):
     __doc__ = parabolic.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(QuadraticModel, self).__init__(parabolic, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, **kwargs):
+    def guess(self, data, x=None, **kwargs):
         a, b, c = 0., 0., 0.
         if x is not None:
             a, b, c = np.polyfit(x, data, 2)
-        self.set_paramval('a', a)
-        self.set_paramval('b', b)
-        self.set_paramval('c', c)
-        self.has_initial_guess = True
+        pars = self.make_params(a=a, b=b, c=c)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
 ParabolicModel = QuadraticModel
 
@@ -103,7 +121,7 @@ class PolynomialModel(Model):
     __doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
     MAX_DEGREE=7
     DEGREE_ERR = "degree must be an integer less than %d."
-    def __init__(self, degree, **kwargs):
+    def __init__(self, degree, *args, **kwargs):
         if not isinstance(degree, int)  or degree > self.MAX_DEGREE:
             raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
 
@@ -112,223 +130,217 @@ class PolynomialModel(Model):
         kwargs['param_names'] = pnames
 
         def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
-            out = np.zeros_like(x)
-            args = dict(c0=c0, c1=c1, c2=c2, c3=c3,
-                        c4=c4, c5=c5, c6=c6, c7=c7)
-            for i in range(self.poly_degree+1):
-                out += x**i * args.get('c%i' % i, 0)
-            return out
-        super(PolynomialModel, self).__init__(polynomial, **kwargs)
-
-    def guess_starting_values(self, data, x=None, **kws):
-        coefs = np.zeros(self.MAX_DEGREE+1)
+            return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
+
+        super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        pars = self.make_params()
         if x is not None:
             out = np.polyfit(x, data, self.poly_degree)
             for i, coef in enumerate(out[::-1]):
-                coefs[i] = coef
-        for i in range(self.poly_degree+1):
-            self.set_paramval('c%i' % (i), coefs[i])
-        self.has_initial_guess = True
+                pars['%sc%i'% (self.prefix, i)].set(value=coef)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class GaussianModel(Model):
     __doc__ = gaussian.__doc__ + COMMON_DOC
     fwhm_factor = 2.354820
-    def __init__(self, **kwargs):
-        super(GaussianModel, self).__init__(gaussian, **kwargs)
-        self.params.add('%sfwhm' % self.prefix,
-                        expr='%.6f*%ssigma' % (self.fwhm_factor, self.prefix))
-
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.set_paramval('amplitude', amp)
-        self.set_paramval('center', cen)
-        self.set_paramval('sigma', sig)
-        self.has_initial_guess = True
+    def __init__(self, *args, **kwargs):
+        super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('fwhm', expr=fwhm_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class LorentzianModel(Model):
     __doc__ = lorentzian.__doc__ + COMMON_DOC
     fwhm_factor = 2.0
-    def __init__(self, **kwargs):
-        super(LorentzianModel, self).__init__(lorentzian, **kwargs)
-        self.params.add('%sfwhm' % self.prefix,
-                        expr='%.7f*%ssigma' % (self.fwhm_factor,
-                                               self.prefix))
-
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.set_paramval('amplitude', amp)
-        self.set_paramval('center', cen)
-        self.set_paramval('sigma', sig)
-        self.has_initial_guess = True
+    def __init__(self, *args, **kwargs):
+        super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('fwhm', expr=fwhm_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class VoigtModel(Model):
     __doc__ = voigt.__doc__ + COMMON_DOC
     fwhm_factor = 3.60131
-    def __init__(self, **kwargs):
-        super(VoigtModel, self).__init__(voigt, **kwargs)
-        self.params.add('%sfwhm' % self.prefix,
-                        expr='%.7f*%ssigma' % (self.fwhm_factor,
-                                               self.prefix))
-
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.set_paramval('amplitude', amp)
-        self.set_paramval('center', cen)
-        self.set_paramval('sigma', sig)
-        self.params['%sgamma' % self.prefix] = \
-                              Parameter(expr = '%ssigma' % self.prefix)
-        self.has_initial_guess = True
+    def __init__(self, *args, **kwargs):
+        super(VoigtModel, self).__init__(voigt, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
+        self.set_param_hint('fwhm',  expr=fwhm_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative,
+                               ampscale=1.5, sigscale=0.65)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class PseudoVoigtModel(Model):
     __doc__ = pvoigt.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(PseudoVoigtModel, self).__init__(pvoigt, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
+        self.set_param_hint('fraction', value=0.5)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.set_paramval('amplitude', amp)
-        self.set_paramval('center', cen)
-        self.set_paramval('sigma', sig)
-        self.set_paramval('fraction', 0.5)
-        self.has_initial_guess = True
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+        pars['%sfraction' % self.prefix].set(value=0.5)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
 
 class Pearson7Model(Model):
     __doc__ = pearson7.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(Pearson7Model, self).__init__(pearson7, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
+        self.set_param_hint('expon',  value=1.5)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.set_paramval('amplitude', amp)
-        self.set_paramval('center', cen)
-        self.set_paramval('sigma', sig)
-        self.set_paramval('exponent', 0.5)
-        self.has_initial_guess = True
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        pars['%sexpon' % self.prefix].set(value=1.5)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
 
 class StudentsTModel(Model):
     __doc__ = students_t.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(StudentsTModel, self).__init__(students_t, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.params['%samplitude' % self.prefix].value = amp
-        self.params['%scenter' % self.prefix].value = cen
-        self.params['%ssigma' % self.prefix].value = sig
-        self.has_initial_guess = True
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
-class BrietWignerModel(Model):
+
+class BreitWignerModel(Model):
     __doc__ = breit_wigner.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(BreitWignerModel, self).__init__(breit_wigner, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        pars['%sq' % self.prefix].set(value=1.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class LognormalModel(Model):
+    __doc__ = lognormal.__doc__ + COMMON_DOC
+    def __init__(self, *args, **kwargs):
+        super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
+        pars['%ssigma' % self.prefix].set(min=0.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.params['%samplitude' % self.prefix].value = amp
-        self.params['%scenter' % self.prefix].value = cen
-        self.params['%ssigma' % self.prefix].value = sig
-        self.params['%sq' % self.prefix].value = 1.0
-        self.has_initial_guess = True
 
 class DampedOscillatorModel(Model):
     __doc__ = damped_oscillator.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(DampedOscillatorModel, self).__init__(damped_oscillator, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.params['%samplitude' % self.prefix].value = amp
-        self.params['%scenter' % self.prefix].value = cen
-        self.params['%ssigma' % self.prefix].value = sig
-        self.has_initial_guess = True
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars =guess_from_peak(self, data, x, negative,
+                              ampscale=0.1, sigscale=0.1)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
 class ExponentialGaussianModel(Model):
     __doc__ = expgaussian.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(ExponentialGaussianModel, self).__init__(expgaussian, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.params['%samplitude' % self.prefix].value = amp
-        self.params['%scenter' % self.prefix].value = cen
-        self.params['%ssigma' % self.prefix].value = sig
-        self.has_initial_guess = True
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
+class SkewedGaussianModel(Model):
+    __doc__ = skewed_gaussian.__doc__ + COMMON_DOC
+    fwhm_factor = 2.354820
+    def __init__(self, *args, **kwargs):
+        super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
 class DonaichModel(Model):
     __doc__ = donaich.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(DonaichModel, self).__init__(donaich, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(DonaichModel, self).__init__(donaich, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
-        amp, cen, sig = estimate_peak(data, x, negative)
-        self.params['%samplitude' % self.prefix].value = amp
-        self.params['%scenter' % self.prefix].value = cen
-        self.params['%ssigma' % self.prefix].value = sig
-        self.has_initial_guess = True
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
 
 class PowerLawModel(Model):
     __doc__ = powerlaw.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(PowerLawModel, self).__init__(powerlaw, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, **kws):
+    def guess(self, data, x=None, **kwargs):
         try:
-            expon, amp = np.polyfit(log(x+1.e-14), log(data+1.e-14), 1)
+            expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
         except:
             expon, amp = 1, np.log(abs(max(data)+1.e-9))
-        self.params['%samplitude' % self.prefix].value = np.exp(amp)
-        self.params['%sexponent' % self.prefix].value = expon
-        self.has_initial_guess = True
+
+        pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class ExponentialModel(Model):
     __doc__ = exponential.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(ExponentialModel, self).__init__(exponential, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, **kws):
+    def guess(self, data, x=None, **kwargs):
         try:
             sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
         except:
             sval, oval = 1., np.log(abs(max(data)+1.e-9))
-        self.params['%samplitude' % self.prefix].value = np.exp(oval)
-        self.params['%sdecay' % self.prefix].value = -1/sval
-        self.has_initial_guess = True
+        pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class StepModel(Model):
     __doc__ = step.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(StepModel, self).__init__(step, **kwargs)
+    def __init__(self, *args, **kwargs):
+        super(StepModel, self).__init__(step, *args, **kwargs)
 
-    def guess_starting_values(self, data, x=None, **kws):
+    def guess(self, data, x=None, **kwargs):
         if x is None:
             return
         ymin, ymax = min(data), max(data)
         xmin, xmax = min(x), max(x)
-        self.set_paramval('amplitude', (ymax-ymin))
-        self.set_paramval('center',    (xmax+xmin)/2.0)
-        self.set_paramval('sigma',     (xmax-xmin)/7.0)
-        self.has_initial_guess = True
+        pars = self.make_params(amplitude=(ymax-ymin),
+                                center=(xmax+xmin)/2.0)
+        pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
 
 class RectangleModel(Model):
     __doc__ = rectangle.__doc__ + COMMON_DOC
-    def __init__(self, **kwargs):
-        super(RectangleModel, self).__init__(rectangle, **kwargs)
-        self.params.add('%smidpoint' % self.prefix,
-                        expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
-                                                            self.prefix))
-    def guess_starting_values(self, data, x=None, **kws):
+    def __init__(self, *args, **kwargs):
+        super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
+        self.set_param_hint('midpoint',
+                            expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
+                                                                self.prefix))
+    def guess(self, data, x=None, **kwargs):
         if x is None:
             return
         ymin, ymax = min(data), max(data)
         xmin, xmax = min(x), max(x)
-        self.set_paramval('amplitude', (ymax-ymin))
-        self.set_paramval('center1',   (xmax+xmin)/4.0)
-        self.set_paramval('sigma1' ,   (xmax-xmin)/7.0)
-        self.set_paramval('center2', 3*(xmax+xmin)/4.0)
-        self.set_paramval('sigma2',    (xmax-xmin)/7.0)
-        self.has_initial_guess = True
+        pars = self.make_params(amplitude=(ymax-ymin),
+                                center1=(xmax+xmin)/4.0,
+                                center2=3*(xmax+xmin)/4.0)
+        pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+        pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
 
diff --git a/lmfit/old_models1d.py b/lmfit/old_models1d.py
deleted file mode 100644
index 31f24cf..0000000
--- a/lmfit/old_models1d.py
+++ /dev/null
@@ -1,400 +0,0 @@
-"""
-Basic Fitting Models for 1-D data usingsimplifying standard line shapes.
-
-All include optional background that can be
-   ('constant', 'linear', 'parabolic')
-
-Models:
-     Linear
-     Quadratic
-     Exponential
-     Gaussian
-     Lorentzian
-     Voigt
-     Step (linear / erf / atan)
-     Rectangular (linear / erf / atan)
-
-  Original concept and code by Tillsten,
-  adopted and expanded by Matt Newville
-"""
-
-import numpy as np
-from scipy.special import gamma, gammaln, beta, betaln, erf, erfc, wofz
-
-from . import Parameter, Parameters, Minimizer
-from . import fit_report as lmfit_report
-
-VALID_BKGS = ('constant', 'linear', 'quadratic')
-
-LOG2 = np.log(2)
-SQRT2   = np.sqrt(2)
-SQRT2PI = np.sqrt(2*np.pi)
-SQRTPI  = np.sqrt(np.pi)
-
-def index_of(arr, val):
-    """return index of array nearest to a value
-    """
-    if val < min(arr):
-        return 0
-    return np.abs(arr-val).argmin()
-
-class FitBackground(object):
-    """base class for fitting models
-    needs to overwrite calculate() method
-    """
-    def __init__(self, **kws):
-        self.params = Parameters()
-        for key, val in kws.items():
-            if val is not None:
-                self.params.add('bkg_%s' % key, value=val, vary=True)
-
-    def calculate(self, x):
-        pass
-
-class PolyBackground(FitBackground):
-    """polynomial background: constant, linear, or quadratic"""
-    def __init__(self, offset=None, slope=None, quad=None):
-        FitBackground.__init__(self, offset=offset, slope=slope, quad=quad)
-
-    def calculate(self, x):
-        bkg = np.zeros_like(x)
-        if 'bkg_offset' in self.params:
-            bkg += self.params['bkg_offset'].value
-        if 'bkg_slope' in self.params:
-            bkg += x*self.params['bkg_slope'].value
-        if 'bkg_quad' in self.params:
-            bkg += x*x*self.params['bkg_quad'].value
-        return bkg
-
-
-class FitModel(object):
-    """base class for fitting models
-
-    only supports polynomial background (offset, slop, quad)
-
-    """
-    invalid_bkg_msg = """Warning: unrecoginzed background option '%s'
-expected one of the following:
-   %s
-"""
-    def __init__(self, background=None, **kws):
-        self.params = Parameters()
-        self.has_initial_guess = False
-        self.bkg = None
-        self.initialize_background(background=background, **kws)
-
-    def initialize_background(self, background=None,
-                              offset=0, slope=0, quad=0):
-        """initialize background parameters"""
-        if background is None:
-            return
-        if background not in VALID_BKGS:
-            print( self.invalid_bkg_msg % (repr(background),
-                                          ', '.join(VALID_BKGS)))
-
-        kwargs = {'offset':offset}
-        if background.startswith('line'):
-            kwargs['slope'] = slope
-        if background.startswith('quad'):
-            kwargs['quad'] = quad
-
-        self.bkg = PolyBackground(**kwargs)
-
-        for nam, par in self.bkg.params.items():
-            self.params[nam] = par
-
-    def calc_background(self, x):
-        if self.bkg is None:
-            return 0
-        return self.bkg.calculate(x)
-
-    def __objective(self, params, y=None, x=None, dy=None, **kws):
-        """fit objective function"""
-        bkg = 0
-        if x is not None: bkg = self.calc_background(x)
-        if y is None:     y   = 0.0
-        if dy is None:    dy  = 1.0
-        model = self.model(self.params, x=x, dy=dy, **kws)
-        return (model + bkg - y)/dy
-
-    def model(self, params, x=None, **kws):
-        raise NotImplementedError
-
-    def guess_starting_values(self, params, y, x=None, **kws):
-        raise NotImplementedError
-
-    def fit_report(self, params=None, **kws):
-        if params is None:
-            params = self.params
-        return lmfit_report(params, **kws)
-
-    def fit(self, y, x=None, dy=None, **kws):
-        fcn_kws = {'y': y, 'x': x, 'dy': dy}
-        fcn_kws.update(kws)
-        if not self.has_initial_guess:
-            self.guess_starting_values(y, x=x, **kws)
-        self.minimizer = Minimizer(self.__objective, self.params,
-                                   fcn_kws=fcn_kws, scale_covar=True)
-        self.minimizer.prepare_fit()
-        self.init = self.model(self.params, x=x, **kws)
-        self.minimizer.leastsq()
-
-class LinearModel(FitModel):
-    """Linear Model: slope, offset, no background"""
-    def __init__(self, offset=0, slope=0, **kws):
-        FitModel.__init__(self, background=None, **kws)
-        self.params.add('offset', value=offset)
-        self.params.add('slope',  value=slope)
-
-    def guess_starting_values(self, y, x=None, **kws):
-        if x is None:
-            sval, oval = 0., 0.
-        else:
-            sval, oval = np.polyfit(x, y, 1)
-        self.params['offset'].value = oval
-        self.params['slope'].value = sval
-        self.has_initial_guess = True
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        return params['offset'].value +  x * params['slope'].value
-
-class QuadraticModel(FitModel):
-    """Quadratic Model: slope, offset, quad, no background"""
-    def __init__(self, offset=0, slope=0, quad=0, **kws):
-        FitModel.__init__(self, background=None, **kws)
-        self.params.add('offset', value=offset)
-        self.params.add('slope',  value=slope)
-        self.params.add('quad',  value=quad)
-
-    def guess_starting_values(self, y, x=None, **kws):
-        if x is None:
-            qval, sval, oval = 0., 0., 0.
-        else:
-            qval, sval, oval = np.polyfit(x, y, 2)
-        self.params['offset'].value = oval
-        self.params['slope'].value = sval
-        self.params['quad'].value = qval
-        self.has_initial_guess = True
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        return params['offset'].value +  x * (params['slope'].value +
-                                              x * params['quad'].value)
-
-class ExponentialModel(FitModel):
-    """Exponential Model: amplitude, decay, optional background"""
-    def __init__(self, amplitude=1, decay=1, background=None, **kws):
-        FitModel.__init__(self, background=background, **kws)
-        self.params.add('amplitude', value=amplitude)
-        self.params.add('decay',  value=decay)
-
-    def guess_starting_values(self, y, x=None, **kws):
-        try:
-            sval, oval = np.polyfit(x, np.log(abs(y)), 1)
-        except:
-            sval, oval = 1., np.log(abs(max(y)+1.e-9))
-        self.params['amplitude'].value = np.exp(oval)
-        self.params['decay'].value = (max(x)-min(x))/10.
-        self.has_initial_guess = True
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        amp   = params['amplitude'].value
-        decay = params['decay'].value
-        return amp*np.exp(-x / decay)
-
-class PeakModel(FitModel):
-    """Generalization for Gaussian/Lorentzian/Voigt Model:
-       amplitude, center, sigma, optional background
-       sets bounds: sigma >= 0
-       """
-    fwhm_factor = 2.0
-    def __init__(self, amplitude=1, center=0, sigma=1,
-                 background=None, **kws):
-        FitModel.__init__(self, background=background, **kws)
-        self.params.add('amplitude', value=amplitude)
-        self.params.add('center',  value=center)
-        self.params.add('sigma',  value=sigma, min=0)
-        self.params.add('fwhm',  expr='%.6f*sigma' % self.fwhm_factor)
-
-    def guess_starting_values(self, y, x=None, negative=False, **kws):
-        """could probably improve this"""
-        if x is None:
-            return
-        maxy, miny = max(y), min(y)
-        extremey = maxy
-        self.params['amplitude'].value =(maxy - miny)*1.5
-        if negative:
-            extremey = miny
-            self.params['amplitude'].value = -(maxy - miny)*1.5
-        imaxy = index_of(y, extremey)
-        sigma_guess = (max(x)-min(x))/6.0
-        halfmax_vals = np.where(y > extremey/2.0)[0]
-        if len(halfmax_vals) > 3:
-            sigma_guess = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/self.fwhm_factor
-
-        self.params['center'].value = x[imaxy]
-        self.params['sigma'].value = sigma_guess
-        if 'bkg_offset' in self.params:
-            bkg_off = miny
-            if negative:  bkg_off = maxy
-            self.params['bkg_offset'].value = bkg_off
-        self.has_initial_guess = True
-
-    def model(self, params=None, x=None, **kws):
-        pass
-
-class GaussianModel(PeakModel):
-    """Gaussian Model:
-    amplitude, center, sigma, optional background"""
-    fwhm_factor = 2.354820
-    def __init__(self, amplitude=1, center=0, sigma=1,
-                 background=None, **kws):
-        PeakModel.__init__(self, amplitude=1, center=0, sigma=1,
-                           background=background, **kws)
-        self.params.add('fwhm',  expr='%g*sigma' % self.fwhm_factor)
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        amp = params['amplitude'].value
-        cen = params['center'].value
-        sig = params['sigma'].value
-        amp = amp/(SQRT2PI*sig)
-        return amp * np.exp(-(x-cen)**2 / (2*sig**2))
-
-class LorentzianModel(PeakModel):
-    """Lorentzian Model:
-    amplitude, center, sigma, optional background"""
-    fwhm_factor = 2.0
-    def __init__(self, amplitude=1, center=0, sigma=1,
-                 background=None, **kws):
-        PeakModel.__init__(self, amplitude=1, center=0, sigma=1,
-                           background=background, **kws)
-        self.params.add('fwhm',  expr='%.6f*sigma' % self.fwhm_factor)
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        amp = params['amplitude'].value
-        cen = params['center'].value
-        sig = params['sigma'].value
-        return (amp/(1 + ((x-cen)/sig)**2))/(np.pi*sig)
-
-class VoigtModel(PeakModel):
-    """Voigt Model:
-    amplitude, center, sigma, optional background
-    this version sets gamma=sigma
-    """
-    fwhm_factor = 3.60131
-    def __init__(self, amplitude=1, center=0, sigma=1,
-                 background=None, **kws):
-        PeakModel.__init__(self, amplitude=1, center=0, sigma=1,
-                           background=background, **kws)
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        amp = params['amplitude'].value
-        cen = params['center'].value
-        sig = params['sigma'].value
-        z = (x-cen + 1j*sig) / (sig*SQRT2)
-        return amp*wofz(z).real / (sig*SQRT2PI)
-
-class StepModel(FitModel):
-    """Step Model: height, center, width, optional background
-    a step can have a form of 'linear' (default), 'atan', or 'erfc'
-    which will give the functional form for going from 0 to height
-   """
-    def __init__(self, height=1, center=0, width=1, form='linear',
-                 background=None, **kws):
-        FitModel.__init__(self, background=background, **kws)
-        self.params.add('height', value=height)
-        self.params.add('center',  value=center)
-        self.params.add('width',  value=width, min=0)
-        self.form = form
-
-    def guess_starting_values(self, y, x=None, **kws):
-        if x is None:
-            return
-        ymin, ymax = min(y), max(y)
-        xmin, xmax = min(x), max(x)
-        self.params['height'].value = (ymax-ymin)
-        self.params['center'].value = (xmax+xmin)/2.0
-        self.params['width'].value  = (xmax-xmin)/7.0
-        self.has_initial_guess = True
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        height = params['height'].value
-        center = params['center'].value
-        width  = params['width'].value
-        out = (x - center)/max(width, 1.e-13)
-        if self.form == 'linear':
-            out[np.where(out<0)] = 0.0
-            out[np.where(out>1)] = 1.0
-        elif self.form == 'atan':
-            out = 0.5 + np.arctan(out)/np.pi
-        elif self.form == 'erf':
-            out = 0.5*(1 + erf(out))
-        return height*out
-
-class RectangularModel(FitModel):
-    """Rectangular Model:  a step up and a step down:
-
-    height, center1, center2, width1, width2, optional background
-
-    a step can have a form of 'linear' (default), 'atan', or 'erfc'
-    which will give the functional form for going from 0 to height
-   """
-    def __init__(self, height=1, center1=0, width1=1,
-                 center2=1, width2=1,
-                 form='linear',
-                 background=None, **kws):
-        FitModel.__init__(self, background=background, **kws)
-        self.params.add('height',   value=height)
-        self.params.add('center1',  value=center1)
-        self.params.add('width1',   value=width1, min=0)
-        self.params.add('center2',  value=center2)
-        self.params.add('width2',   value=width2, min=0)
-        self.params.add('midpoint',   expr='(center1+center2)/2.0')
-        self.form = form
-
-    def guess_starting_values(self, y, x=None, **kws):
-        if x is None:
-            return
-        ymin, ymax = min(y), max(y)
-        xmin, xmax = min(x), max(x)
-        self.params['height'].value = (ymax-ymin)
-        self.params['center1'].value = (xmax+xmin)/4.0
-        self.params['width1'].value  = (xmax-xmin)/7.0
-        self.params['center2'].value = 3*(xmax+xmin)/4.0
-        self.params['width2'].value  = (xmax-xmin)/7.0
-        self.has_initial_guess = True
-
-    def model(self, params=None, x=None, **kws):
-        if params is None:
-            params = self.params
-        height  = params['height'].value
-        center1 = params['center1'].value
-        width1  = params['width1'].value
-        center2 = params['center2'].value
-        width2  = params['width2'].value
-        arg1 = (x - center1)/max(width1, 1.e-13)
-        arg2 = (center2 - x)/max(width2, 1.e-13)
-        if self.step == 'atan':
-            out = (np.arctan(arg1) + np.arctan(arg2))/np.pi
-        elif self.step == 'erf':
-            out = 0.5*(erf(arg1) + erf(arg2))
-        else: # 'linear'
-            arg1[np.where(arg1 < 0)] = 0.0
-            arg1[np.where(arg1 > 1)] = 1.0
-            arg2[np.where(arg2 < -1)] = -1.0
-            arg2[np.where(arg2 > 0)] = 0.0
-            out = arg1 + arg2
-        return height*out
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 6596ff4..299818a 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -36,6 +36,13 @@ class Parameters(OrderedDict):
         OrderedDict.__setitem__(self, key, value)
         value.name = key
 
+    def __add__(self, other):
+        "add Parameters objects"
+        if not isinstance(other, Parameters):
+            raise ValueError("'%s' is not a Parameters object" % other)
+        self.update(other)
+        return self
+
     def add(self, name, value=None, vary=True, min=None, max=None, expr=None):
         """convenience function for adding a Parameter:
         with   p = Parameters()
@@ -62,6 +69,14 @@ class Parameters(OrderedDict):
         for para in parlist:
             self.add(*para)
 
+    def valuesdict(self):
+        """return on ordered dictionary of name:value pairs for each Parameter.
+        This is distinct from the Parameters itself, as it has values of
+        the Parameeter values, not the full Parameter object """
+
+        return OrderedDict(((p.name, p.value) for p in self.values()))
+
+
 class Parameter(object):
     """A Parameter is the basic Parameter going
     into Fit Model.  The Parameter holds many attributes:
@@ -77,13 +92,26 @@ class Parameter(object):
         self.min = min
         self.max = max
         self.vary = vary
-        self.expr = expr
+        self._expr = expr
         self.deps   = None
         self.stderr = None
         self.correl = None
         self.from_internal = lambda val: val
         self._init_bounds()
 
+    def set(self, value=None, vary=None, min=None, max=None, expr=None):
+        "set value, vary, min, max, expr with keyword args"
+        if value is not None:
+            self._val = value
+        if vary is not None:
+            self.vary = vary
+        if min is not None:
+            self.min = min
+        if max is not None:
+            self.max = max
+        if expr is not None:
+            self.expr = expr
+
     def _init_bounds(self):
         """make sure initial bounds are self-consistent"""
         #_val is None means - infinity.
@@ -94,6 +122,8 @@ class Parameter(object):
                 self._val = self.min
         elif self.min is not None:
             self._val = self.min
+        elif self.max is not None:
+            self._val = self.max
         self.setup_bounds()
 
     def __getstate__(self):
@@ -202,6 +232,19 @@ class Parameter(object):
     def value(self, val):
         "set value"
         self._val = val
+
+    @property
+    def expr(self):
+        "get expression"
+        return self._expr
+
+    @expr.setter
+    def expr(self, val):
+        "set expr"
+        if val == '':
+            val = None
+        self._expr = val
+
     def __str__(self):
         "string"
         return self.__repr__()
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index f74c25c..e6ef3f4 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -32,6 +32,33 @@ def getfloat_attr(obj, attr, fmt='%.3f'):
     else:
         return repr(val)
 
+def gformat(val, length=11):
+    """format a number with '%g'-like format, except that
+    the return will be length ``length`` (default=12)
+    and have at least length-6 significant digits
+    """
+    length = max(length, 7)
+    fmt = '{: .%ig}' % (length-6)
+    if isinstance(val, int):
+        out = ('{: .%ig}' % (length-2)).format(val)
+        if len(out) > length:
+            out = fmt.format(val)
+    else:
+        out = fmt.format(val)
+    if len(out) < length:
+        if 'e' in out:
+            ie = out.find('e')
+            if '.' not in out[:ie]:
+                out = out[:ie] + '.' + out[ie:]
+            out = out.replace('e', '0'*(length-len(out))+'e')
+        else:
+            fmt = '{: .%ig}' % (length-1)
+            out = fmt.format(val)[:length]
+            if len(out) < length:
+                pad = '0' if '.' in  out else ' '
+                out += pad*(length-len(out))
+    return out
+
 CORREL_HEAD = '[[Correlations]] (unreported correlations are < % .3f)'
 
 def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1):
@@ -68,30 +95,31 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1):
     add("[[Variables]]")
     for name in parnames:
         par = params[name]
-        space = ' '*(namelen+2 - len(name))
-        nout = "%s: %s" % (name, space)
-        inval = 'inital = ?'
+        space = ' '*(namelen+1-len(name))
+        nout = "%s:%s" % (name, space)
+        inval = '(init= ?)'
         if par.init_value is not None:
-            inval = 'initial = % .7g' % par.init_value
+            inval = '(init=% .7g)' % par.init_value
         if modelpars is not None and name in modelpars:
             inval = '%s, model_value =% .7g' % (inval, modelpars[name].value)
-
         try:
-            sval = '% .7g' % par.value
+            sval = gformat(par.value)
         except (TypeError, ValueError):
             sval = 'Non Numeric Value?'
 
         if par.stderr is not None:
-            sval = '% .7g +/- %.7g' % (par.value, par.stderr)
+            serr = gformat(par.stderr, length=9)
+
             try:
-                sval = '%s (%.2f%%)' % (sval, abs(par.stderr/par.value)*100)
+                spercent = '({:.2%})'.format(abs(par.stderr/par.value))
             except ZeroDivisionError:
-                pass
+                spercent = ''
+            sval = '%s +/-%s %s' % (sval, serr, spercent)
 
         if par.vary:
             add("    %s %s %s" % (nout, sval, inval))
         elif par.expr is not None:
-            add("    %s %s == '%s'" % (nout, sval, par.expr))
+            add("    %s %s  == '%s'" % (nout, sval, par.expr))
         else:
             add("    %s % .7g (fixed)" % (nout, par.value))
 
diff --git a/lmfit/ui/__init__.py b/lmfit/ui/__init__.py
new file mode 100644
index 0000000..3091fa9
--- /dev/null
+++ b/lmfit/ui/__init__.py
@@ -0,0 +1,42 @@
+# These variables are used at the end of the module to decide
+# which BaseFitter subclass the Fitter will point to.
+import warnings
+
+has_ipython, has_matplotlib = False, False
+
+try:
+    import matplotlib
+except ImportError:
+    pass
+else:
+    has_matplotlib = True
+
+try:
+    import IPython
+except ImportError:
+    pass
+else:
+    _ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2."
+    _ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version"
+    try:
+        if IPython.release.version_info[0] < 2:
+            warnings.warn(_ipy_msg1)
+        else:
+            # has_ipython = iPython installed and we are in an IPython session.
+            has_ipython = IPython.get_ipython() is not None
+    except Exception as e:
+        warnings.warn(_ipy_msg2)
+
+from .basefitter import BaseFitter
+Fitter = BaseFitter
+if has_matplotlib:
+    from .basefitter import MPLFitter
+    BaseFitter = BaseFitter
+    Fitter = MPLFitter
+
+if has_ipython:
+    from .ipy_fitter import NotebookFitter
+    BaseFitter = BaseFitter
+    MPLFitter = MPLFitter
+    Fitter = NotebookFitter
+
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
new file mode 100644
index 0000000..ae51dea
--- /dev/null
+++ b/lmfit/ui/basefitter.py
@@ -0,0 +1,320 @@
+import warnings
+import numpy as np
+
+from ..model import Model
+from ..models import ExponentialModel  # arbitrary default
+from ..asteval import Interpreter
+from ..astutils import NameFinder
+from ..minimizer import check_ast_errors
+
+
+_COMMON_DOC = """
+    This an interactive container for fitting models to particular data.
+
+    It maintains the attributes `current_params` and `current_result`. When
+    its fit() method is called, the best fit becomes the new `current_params`.
+    The most basic usage is iteratively fitting data, taking advantage of
+    this stateful memory that keep the parameters between each fit.
+"""
+
+_COMMON_EXAMPLES_DOC = """
+
+    Examples
+    --------
+    >>> fitter = Fitter(data, model=SomeModel, x=x)
+
+    >>> fitter.model
+    # This property can be changed, to try different models on the same
+    # data with the same independent vars.
+    # (This is especially handy in the notebook.)
+
+    >>> fitter.current_params
+    # This copy of the model's Parameters is updated after each fit.
+
+    >>> fitter.fit()
+    # Perform a fit using fitter.current_params as a guess.
+    # Optionally, pass a params argument or individual keyword arguments
+    # to override current_params.
+
+    >>> fitter.current_result
+    # This is the result of the latest fit. It contain the usual
+    # copies of the Parameters, in the attributes params and init_params.
+
+    >>> fitter.data = new_data
+    # If this property is updated, the `current_params` are retained an used
+    # as an initial guess if fit() is called again.
+    """
+
+
+class BaseFitter(object):
+    __doc__ = _COMMON_DOC + """
+
+    Parameters
+    ----------
+    data : array-like
+    model : lmfit.Model
+        optional initial Model to use, maybe be set or changed later
+    """ + _COMMON_EXAMPLES_DOC
+    def __init__(self, data, model=None, **kwargs):
+        self._data = data
+        self.kwargs = kwargs
+
+        # GUI-based subclasses need a default value for the menu of models,
+        # and so an arbitrary default is applied here, for uniformity
+        # among the subclasses.
+        if model is None:
+            model = ExponentialModel
+        self.model = model
+
+    def _on_model_value_change(self, name, value):
+        self.model = value
+
+    def _on_fit_button_click(self, b):
+        self.fit()
+
+    def _on_guess_button_click(self, b):
+        self.guess()
+
+    @property
+    def data(self):
+        return self._data
+
+    @data.setter
+    def data(self, value):
+        self._data = value
+
+    @property
+    def model(self):
+        return self._model
+
+    @model.setter
+    def model(self, value):
+        if callable(value):
+            model = value()
+        else:
+            model = value
+        self._model = model
+        self.current_result = None
+        self._current_params = model.make_params()
+
+        # Use these to evaluate any Parameters that use expressions.
+        self.asteval = Interpreter()
+        self.namefinder = NameFinder()
+
+        self._finalize_model(value)
+
+        self.guess()
+
+    def _finalize_model(self, value):
+        # subclasses optionally override to update display here
+        pass
+
+    @property
+    def current_params(self):
+        """Each time fit() is called, these will be updated to reflect
+        the latest best params. They will be used as the initial guess
+        for the next fit, unless overridden by arguments to fit()."""
+        return self._current_params
+
+    @current_params.setter
+    def current_params(self, new_params):
+        # Copy contents, but retain original params objects.
+        for name, par in new_params.items():
+            self._current_params[name].value = par.value
+            self._current_params[name].expr = par.expr
+            self._current_params[name].vary = par.vary
+            self._current_params[name].min = par.min
+            self._current_params[name].max = par.max
+
+        # Compute values for expression-based Parameters.
+        self.__assign_deps(self._current_params)
+        for _, par in self._current_params.items():
+            if par.value is None:
+                self.__update_paramval(self._current_params, par.name)
+
+        self._finalize_params()
+
+    def _finalize_params(self):
+        # subclasses can override this to pass params to display
+        pass
+
+    def guess(self):
+        count_indep_vars = len(self.model.independent_vars)
+        guessing_successful = True
+        try:
+            if count_indep_vars == 0:
+                guess = self.model.guess(self._data)
+            elif count_indep_vars == 1:
+                key = self.model.independent_vars[0]
+                val = self.kwargs[key]
+                d = {key: val}
+                guess = self.model.guess(self._data, **d)
+        except NotImplementedError:
+            guessing_successful = False
+        self.current_params = guess
+        return guessing_successful
+
+    def __assign_deps(self, params):
+        # N.B. This does not use self.current_params but rather
+        # new Parameters that are being built by self.guess().
+        for name, par in params.items():
+            if par.expr is not None:
+                par.ast = self.asteval.parse(par.expr)
+                check_ast_errors(self.asteval.error)
+                par.deps = []
+                self.namefinder.names = []
+                self.namefinder.generic_visit(par.ast)
+                for symname in self.namefinder.names:
+                    if (symname in self.current_params and
+                        symname not in par.deps):
+                        par.deps.append(symname)
+                self.asteval.symtable[name] = par.value
+                if par.name is None:
+                    par.name = name
+
+    def __update_paramval(self, params, name):
+        # N.B. This does not use self.current_params but rather
+        # new Parameters that are being built by self.guess().
+        par = params[name]
+        if getattr(par, 'expr', None) is not None:
+            if getattr(par, 'ast', None) is None:
+                par.ast = self.asteval.parse(par.expr)
+            if par.deps is not None:
+                for dep in par.deps:
+                    self.__update_paramval(params, dep)
+            par.value = self.asteval.run(par.ast)
+            out = check_ast_errors(self.asteval.error)
+            if out is not None:
+                self.asteval.raise_exception(None)
+        self.asteval.symtable[name] = par.value
+
+    def fit(self, *args, **kwargs):
+        "Use current_params unless overridden by arguments passed here."
+        guess = dict(self.current_params)
+        guess.update(self.kwargs)  # from __init__, e.g. x=x
+        guess.update(kwargs)
+        self.current_result = self.model.fit(self._data, *args, **guess)
+        self.current_params = self.current_result.params
+
+
+class MPLFitter(BaseFitter):
+    # This is a small elaboration on BaseModel; it adds a plot()
+    # method that depends on matplotlib. It adds several plot-
+    # styling arguments to the signature.
+    __doc__ = _COMMON_DOC + """
+
+    Parameters
+    ----------
+    data : array-like
+    model : lmfit.Model
+        optional initial Model to use, maybe be set or changed later
+
+    Additional Parameters
+    ---------------------
+    axes_style : dictionary representing style keyword arguments to be
+        passed through to `Axes.set(...)`
+    data_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the data points
+    init_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the initial fit
+        line
+    best_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the best fit
+        line
+    **kwargs : independent variables or extra arguments, passed like `x=x`
+        """ + _COMMON_EXAMPLES_DOC
+    def __init__(self, data, model=None, axes_style={},
+                data_style={}, init_style={}, best_style={}, **kwargs):
+        self.axes_style = axes_style
+        self.data_style = data_style
+        self.init_style = init_style
+        self.best_style = best_style
+        super(MPLFitter, self).__init__(data, model, **kwargs)
+
+    def plot(self, axes_style={}, data_style={}, init_style={}, best_style={},
+             ax=None):
+        """Plot data, initial guess fit, and best fit.
+
+    Optional style arguments pass keyword dictionaries through to their
+    respective components of the matplotlib plot.
+
+    Precedence is:
+    1. arguments passed to this function, plot()
+    2. arguments passed to the Fitter when it was first declared
+    3. hard-coded defaults
+
+    Parameters
+    ---------------------
+    axes_style : dictionary representing style keyword arguments to be
+        passed through to `Axes.set(...)`
+    data_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the data points
+    init_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the initial fit
+        line
+    best_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the best fit
+        line
+    ax : matplotlib.Axes
+            optional `Axes` object. Axes will be generated if not provided.
+        """
+        try:
+            import matplotlib.pyplot as plt
+        except ImportError:
+            raise ImportError("Matplotlib is required to use this Fitter. "
+                              "Use BaseFitter or a subclass thereof "
+                              "that does not depend on matplotlib.")
+
+        # Configure style
+        _axes_style= dict()  # none, but this is here for possible future use
+        _axes_style.update(self.axes_style)
+        _axes_style.update(axes_style)
+        _data_style= dict(color='blue', marker='o', linestyle='none')
+        _data_style.update(**_normalize_kwargs(self.data_style, 'line2d'))
+        _data_style.update(**_normalize_kwargs(data_style, 'line2d'))
+        _init_style = dict(color='gray')
+        _init_style.update(**_normalize_kwargs(self.init_style, 'line2d'))
+        _init_style.update(**_normalize_kwargs(init_style, 'line2d'))
+        _best_style= dict(color='red')
+        _best_style.update(**_normalize_kwargs(self.best_style, 'line2d'))
+        _best_style.update(**_normalize_kwargs(best_style, 'line2d'))
+
+        if ax is None:
+            fig, ax = plt.subplots()
+        count_indep_vars = len(self.model.independent_vars)
+        if count_indep_vars == 0:
+            ax.plot(self._data, **_data_style)
+        elif count_indep_vars == 1:
+            indep_var = self.kwargs[self.model.independent_vars[0]]
+            ax.plot(indep_var, self._data, **_data_style)
+        else:
+            raise NotImplementedError("Cannot plot models with more than one "
+                                      "indepedent variable.")
+        result = self.current_result  # alias for brevity
+        if not result:
+            ax.set(**_axes_style)
+            return  # short-circuit the rest of the plotting
+        if count_indep_vars == 0:
+            ax.plot(result.init_fit, **_init_style)
+            ax.plot(result.best_fit, **_best_style)
+        elif count_indep_vars == 1:
+            ax.plot(indep_var, result.init_fit, **_init_style)
+            ax.plot(indep_var, result.best_fit, **_best_style)
+        ax.set(**_axes_style)
+
+
+def _normalize_kwargs(kwargs, kind='patch'):
+    """Convert matplotlib keywords from short to long form."""
+    # Source:
+    # github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
+    if kind == 'line2d':
+        long_names = dict(c='color', ls='linestyle', lw='linewidth',
+                          mec='markeredgecolor', mew='markeredgewidth',
+                          mfc='markerfacecolor', ms='markersize',)
+    elif kind == 'patch':
+        long_names = dict(c='color', ls='linestyle', lw='linewidth',
+                          ec='edgecolor', fc='facecolor',)
+    for short_name in long_names:
+        if short_name in kwargs:
+            kwargs[long_names[short_name]] = kwargs.pop(short_name)
+    return kwargs
diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
new file mode 100644
index 0000000..fb94dd2
--- /dev/null
+++ b/lmfit/ui/ipy_fitter.py
@@ -0,0 +1,262 @@
+import warnings
+import numpy as np
+
+from ..model import Model
+
+from .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC
+
+# Note: If IPython is not available of the version is < 2,
+# this module will not be imported, and a different Fitter.
+
+import IPython
+from IPython.display import display, clear_output
+# Widgets were only experimental in IPython 2.x, but this does work there.
+# Handle the change in naming from 2.x to 3.x.
+if IPython.release.version_info[0] == 2:
+    from IPython.html.widgets import DropdownWidget as Dropdown
+    from IPython.html.widgets import ButtonWidget as Button
+    from IPython.html.widgets import ContainerWidget as Box
+    from IPython.html.widgets import FloatTextWidget as FloatText
+    from IPython.html.widgets import CheckboxWidget as Checkbox
+else:
+    # as of IPython 3.x:
+    from IPython.html.widgets import Dropdown
+    from IPython.html.widgets import Button
+    from IPython.html.widgets import Box
+    from IPython.html.widgets import FloatText
+    from IPython.html.widgets import Checkbox
+
+
+class ParameterWidgetGroup(object):
+    """Construct several widgets that together represent a Parameter.
+
+    This will only be used if IPython is available."""
+    def __init__(self, par):
+        self.par = par
+
+        # Define widgets.
+        self.value_text = FloatText(description=par.name,
+                                    min=self.par.min, max=self.par.max)
+        self.min_text = FloatText(description='min', max=self.par.max)
+        self.max_text = FloatText(description='max', min=self.par.min)
+        self.min_checkbox = Checkbox(description='min')
+        self.max_checkbox = Checkbox(description='max')
+        self.vary_checkbox = Checkbox(description='vary')
+
+        # Set widget values and visibility.
+        if par.value is not None:
+            self.value_text.value = self.par.value
+        min_unset = self.par.min is None or self.par.min == -np.inf
+        max_unset = self.par.max is None or self.par.max == np.inf
+        self.min_checkbox.value = not min_unset
+        self.min_text.visible = not min_unset
+        self.min_text.value = self.par.min
+        self.max_checkbox.value = not max_unset
+        self.max_text.visible = not max_unset
+        self.max_text.value = self.par.max
+        self.vary_checkbox.value = self.par.vary
+
+        # Configure widgets to sync with par attributes.
+        self.value_text.on_trait_change(self._on_value_change, 'value')
+        self.min_text.on_trait_change(self._on_min_value_change, 'value')
+        self.max_text.on_trait_change(self._on_max_value_change, 'value')
+        self.min_checkbox.on_trait_change(self._on_min_checkbox_change,
+                                          'value')
+        self.max_checkbox.on_trait_change(self._on_max_checkbox_change,
+                                          'value')
+        self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')
+
+    def _on_value_change(self, name, value):
+        self.par.value = value
+
+    def _on_min_checkbox_change(self, name, value):
+        self.min_text.visible = value
+        if value:
+            # -np.inf does not play well with a numerical text field,
+            # so set min to -1 if activated (and back to -inf if deactivated).
+            self.min_text.value = -1
+            self.par.min = self.min_text.value
+            self.value_text.min = self.min_text.value
+        else:
+            self.par.min = None
+
+    def _on_max_checkbox_change(self, name, value):
+        self.max_text.visible = value
+        if value:
+            # np.inf does not play well with a numerical text field,
+            # so set max to 1 if activated (and back to inf if deactivated).
+            self.max_text.value = 1
+            self.par.max = self.max_text.value
+            self.value_text.max = self.max_text.value
+        else:
+            self.par.max = None
+
+    def _on_min_value_change(self, name, value):
+        self.par.min = value
+        self.value_text.min = value
+        self.max_text.min = value
+
+    def _on_max_value_change(self, name, value):
+        self.par.max = value
+        self.value_text.max = value
+        self.min_text.max = value
+
+    def _on_vary_change(self, name, value):
+        self.par.vary = value
+        self.value_text.disabled = not value
+
+    def close(self):
+        # one convenience method to close (i.e., hide and disconnect) all
+        # widgets in this group
+        self.value_text.close()
+        self.min_text.close()
+        self.max_text.close()
+        self.vary_checkbox.close()
+        self.min_checkbox.close()
+        self.max_checkbox.close()
+
+    def _repr_html_(self):
+        box = Box()
+        box.children = [self.value_text, self.vary_checkbox,
+                        self.min_text, self.min_checkbox,
+                        self.max_text, self.max_checkbox]
+        display(box)
+        box.add_class('hbox')
+
+    # Make it easy to set the widget attributes directly.
+    @property
+    def value(self):
+        return self.value_text.value
+
+    @value.setter
+    def value(self, value):
+        self.value_text.value = value
+
+    @property
+    def vary(self):
+        return self.vary_checkbox.value
+
+    @vary.setter
+    def vary(self, value):
+        self.vary_checkbox.value = value
+
+    @property
+    def min(self):
+        return self.min_text.value
+
+    @min.setter
+    def min(self, value):
+        self.min_text.value = value
+
+    @property
+    def max(self):
+        return self.max_text.value
+
+    @max.setter
+    def max(self, value):
+        self.max_text.value = value
+
+    @property
+    def name(self):
+       return self.par.name
+
+
+class NotebookFitter(MPLFitter):
+    __doc__ = _COMMON_DOC + """
+    If IPython is available, it uses the IPython notebook's rich display
+    to fit data interactively in a web-based GUI. The Parameters are
+    represented in a web-based form that is kept in sync with `current_params`.
+    All subclasses to Model, including user-defined ones, are shown in a
+    drop-down menu.
+
+    Clicking the "Fit" button updates a plot, as above, and updates the
+    Parameters in the form to reflect the best fit.
+
+    Parameters
+    ----------
+    data : array-like
+    model : lmfit.Model
+        optional initial Model to use, maybe be set or changed later
+    all_models : list
+        optional list of Models to populate drop-down menu, by default
+        all built-in and user-defined subclasses of Model are used
+
+    Additional Parameters
+    ---------------------
+    axes_style : dictionary representing style keyword arguments to be
+        passed through to `Axes.set(...)`
+    data_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the data points
+    init_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the initial fit
+        line
+    best_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the best fit
+        line
+    **kwargs : independent variables or extra arguments, passed like `x=x`
+    """ + _COMMON_EXAMPLES_DOC
+    def __init__(self, data, model=None, all_models=None, axes_style={},
+                data_style={}, init_style={}, best_style={}, **kwargs):
+        # Dropdown menu of all subclasses of Model, incl. user-defined.
+        self.models_menu = Dropdown()
+        if all_models is None:
+            all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])
+        self.models_menu.values = all_models
+        self.models_menu.on_trait_change(self._on_model_value_change,
+                                             'value')
+        # Button to trigger fitting.
+        self.fit_button = Button(description='Fit')
+        self.fit_button.on_click(self._on_fit_button_click)
+
+        # Button to trigger guessing.
+        self.guess_button = Button(description='Auto-Guess')
+        self.guess_button.on_click(self._on_guess_button_click)
+
+        # Parameter widgets are not built here. They are (re-)built when
+        # the model is (re-)set.
+        super(NotebookFitter, self).__init__(data, model, axes_style,
+                                             data_style, init_style,
+                                             best_style, **kwargs)
+
+    def _repr_html_(self):
+        display(self.models_menu)
+        button_box = Box()
+        button_box.children = [self.fit_button, self.guess_button]
+        display(button_box)
+        button_box.add_class('hbox')
+        for pw in self.param_widgets:
+            display(pw)
+        self.plot()
+
+    def guess(self):
+        guessing_successful = super(NotebookFitter, self).guess()
+        self.guess_button.disabled = not guessing_successful
+
+    def _finalize_model(self, value):
+        first_run = not hasattr(self, 'param_widgets')
+        if not first_run:
+            # Remove all Parameter widgets, and replace them with widgets
+            # for the new model.
+            for pw in self.param_widgets:
+                pw.close()
+        self.models_menu.value = value
+        self.param_widgets = [ParameterWidgetGroup(p)
+                              for _, p in self._current_params.items()]
+        if not first_run:
+            for pw in self.param_widgets:
+                display(pw)
+
+    def _finalize_params(self):
+        for pw in self.param_widgets:
+            pw.value = self._current_params[pw.name].value
+            pw.min = self._current_params[pw.name].min
+            pw.max = self._current_params[pw.name].max
+            pw.vary = self._current_params[pw.name].vary
+
+    def plot(self):
+        clear_output(wait=True)
+        super(NotebookFitter, self).plot()
+
+    def fit(self):
+        super(NotebookFitter, self).fit()
+        self.plot()
diff --git a/lmfit/wrap.py b/lmfit/wrap.py
deleted file mode 100644
index 49a4aa9..0000000
--- a/lmfit/wrap.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python
-
-from inspect import getargspec
-from .parameter import Parameters
-
-def make_paras_and_func(fcn, x0, used_kwargs=None):
-    """A function which takes a function a makes a parameters-dict for it.
-
-    Takes the function fcn. A starting guess x0 for the
-    non kwargs paramter must be also given. If kwargs
-    are used, used_kwargs is dict were the keys are the
-    used kwarg and the values are the starting values.
-    """
-    import inspect
-    args = inspect.getargspec(fcn)
-    defaults = args[-1]
-    len_def = len(defaults) if defaults is not None else 0
-    # have_defaults = args[-len(defaults):]
-
-    args_without_defaults = len(args[0]) - len_def
-
-    if len(x0) < args_without_defaults:
-        raise ValueError('x0 to short')
-    p = Parameters()
-    for i, val in enumerate(x0):
-        p.add(args[0][i], val)
-
-    if used_kwargs:
-        for arg, val in used_kwargs.items():
-            p.add(arg, val)
-    else:
-        used_kwargs = {}
-
-    def func(para):
-        "wrapped func"
-        kwdict = {}
-        for arg in used_kwargs.keys():
-            kwdict[arg] = para[arg].value
-
-        vals = [para[i].value for i in p]
-        return fcn(*vals[:len(x0)], **kwdict)
-
-    return p, func
-
-
-def wrap_function(fcn, x0=None, non_params=None):
-    """wrap a function, transforming the functions positional arguments
-    into a Parameters dictionay object.   Thea
-    new wrapper function using these Parameters that can be used
-    as an objective function.
-
-    Arguments
-    ---------
-    fcn          function to be wrapped using positional arguments
-    x0           list or array of starting values for positional arguments
-    non_params   list or arguments that should *not* be tranformer to
-                 Parameters, but be turned into keyword arguments.
-    Returns
-    -------
-    params, wrapped_function
-
-    Example
-    -------
-     >>> def lorentz(x, amp, cen, wid):
-     ...     'standard definition of Lorentzian function'
-     ...     return  amp/(1.0 + ((x-cen)/wid)**2)
-     >>>
-     >>> pars, fwrap = wrap_function(lorentz, x0=(5., 0., 1.2),
-     ...                             non_params=['x'])
-     >>> for p in pars.values(): print(p)
-     <Parameter 'amp', 5.0, bounds=[-inf:inf]>
-     <Parameter 'cen', 0.0, bounds=[-inf:inf]>
-     <Parameter 'wid', 1.2, bounds=[-inf:inf]>
-
-     >>> x = np.linspace(-5, 5, 101)
-     >>> fwrap(pars, x=x)
-    """
-    x0 = x0 if x0 is not None else []
-    non_params = non_params if non_params is not None else []
-    required_kwargs = []
-    kwargs = {}
-
-    argspec = getargspec(fcn)
-    defaults = argspec.defaults
-    len_def = len(defaults) if defaults is not None else 0
-    nposargs = len(argspec.args) - len_def
-
-    p = Parameters()
-    # positional arguments
-    i = 0
-    for parname in argspec.args[:nposargs]:
-        if parname in non_params:
-            required_kwargs.append(parname)
-        else:
-            val = 0.0
-            if i < len(x0):
-                val = float(x0[i])
-                i += 1
-            p.add(parname, val)
-    # keyword arguments
-    if len_def > 0:
-        for ikw in range(len_def):
-            parname = argspec.args[nposargs+ikw]
-            defval = argspec.defaults[ikw]
-            if len(x0) > nposargs and len(x0) > nposargs+ikw:
-                defval = x0[nposargs+ikw]
-            if (parname in non_params or
-                not isinstance(defval, (int, float))):
-                required_kwargs.append(parname)
-                kwargs[parname] = defval
-            else:
-                p.add(parname, defval)
-
-    def func(params, **kws):
-        "wrapped function"
-        vals = []
-        kwdict = {}
-        kwdict.update(kwargs)
-
-        for varname in required_kwargs:
-            if varname not in kws and varname not in kwargs:
-                raise ValueError("No value for required argument %s" % varname)
-
-        for varname in argspec.args:
-            if varname in p:
-                vals.append(p[varname].value)
-            elif varname in kws:
-                vals.append(kws.pop(varname))
-            else:
-                raise ValueError("No value for %s" % varname)
-
-        kwdict.update(kws)
-        return fcn(*vals, **kwdict)
-
-    tmpl = "wrapping of %s for Parameters. Original doc:\n%s"
-    func.__doc__ = tmpl % (fcn.__name__, fcn.__doc__)
-    return p, func
diff --git a/requirements.txt b/requirements.txt
index 11580ee..fe73b4f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,2 @@
-scipy>=0.13
 numpy>=1.5
-
+scipy>=0.13
diff --git a/setup.py b/setup.py
index eeee2df..9dad2f1 100644
--- a/setup.py
+++ b/setup.py
@@ -43,6 +43,6 @@ setup(name = 'lmfit',
       # test_suite='nose.collector',
       # test_requires=['Nose'],
       package_dir = {'lmfit': 'lmfit'},
-      packages = ['lmfit', 'lmfit.uncertainties'],
+      packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
       )
 
diff --git a/tests/test_copy_params.py b/tests/test_copy_params.py
new file mode 100644
index 0000000..e17aa18
--- /dev/null
+++ b/tests/test_copy_params.py
@@ -0,0 +1,36 @@
+import numpy as np
+from lmfit import Parameters, minimize, report_fit
+
+def get_data():
+    x = np.arange(0, 1, 0.01)
+    y1 = 1.5*np.exp(0.9*x) + np.random.normal(scale=0.001, size=len(x))
+    y2 = 2.0 + x + 1/2.*x**2 +1/3.*x**3
+    y2 = y2 + np.random.normal(scale=0.001, size=len(x))
+    return x, y1, y2
+
+def residual(params, x, data):
+    a = params['a'].value
+    b = params['b'].value
+
+    model = a*np.exp(b*x)
+    return (data-model)
+
+def test_copy_params():
+    x, y1, y2 = get_data()
+
+    params = Parameters()
+    params.add('a', value = 2.0)
+    params.add('b', value = 2.0)
+
+    # fit to first data set
+    out1 = minimize(residual, params, args=(x, y1))
+
+    # fit to second data set
+    out2 = minimize(residual, params, args=(x, y2))
+
+    adiff = out1.params['a'].value - out2.params['a'].value
+    bdiff = out1.params['b'].value - out2.params['b'].value
+
+    assert(abs(adiff) > 1.e-2)
+    assert(abs(bdiff) > 1.e-2)
+
diff --git a/tests/test_model.py b/tests/test_model.py
index 477cdd3..f58e08e 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -1,5 +1,6 @@
 import unittest
 import warnings
+import nose
 from numpy.testing import assert_allclose
 import numpy as np
 
@@ -11,31 +12,176 @@ def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
     for param_name, value in desired.items():
          assert_allclose(actual[param_name], value, rtol, atol, err_msg, verbose)
 
-class TestUserDefiniedModel(unittest.TestCase):
-    # mainly aimed at checking that the API does what it says it does
-    # and raises the right exceptions or warnings when things are not right
+def _skip_if_no_pandas():
+    try:
+        import pandas
+    except ImportError:
+        raise nose.SkipTest("Skipping tests that require pandas.")
+
+
+class CommonTests(object):
+    # to be subclassed for testing predefined models
 
     def setUp(self):
-        self.x = np.linspace(-10, 10, num=1000)
         np.random.seed(1)
-        self.noise = 0.01*np.random.randn(*self.x.shape)
-        self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
-        self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
-        # return a fresh copy
-        self.model = Model(gaussian)
-        self.data = gaussian(x=self.x, **self.true_values()) + self.noise
+        self.noise = 0.0001*np.random.randn(*self.x.shape)
+        # Some Models need args (e.g., polynomial order), and others don't.
+        try:
+            args = self.args
+        except AttributeError:
+            self.model = self.model_constructor()
+            self.model_drop = self.model_constructor(missing='drop')
+            self.model_raise = self.model_constructor(missing='raise')
+            self.model_explicit_var = self.model_constructor(['x'])
+            func = self.model.func
+        else:
+            self.model = self.model_constructor(*args)
+            self.model_drop = self.model_constructor(*args, missing='drop')
+            self.model_raise = self.model_constructor(*args, missing='raise')
+            self.model_explicit_var = self.model_constructor(
+                *args, independent_vars=['x'])
+            func = self.model.func
+        self.data = func(x=self.x, **self.true_values()) + self.noise
+
+    @property
+    def x(self):
+        return np.linspace(1, 10, num=1000)
+
+    def test_fit(self):
+        model = self.model
+
+        # Pass Parameters object.
+        params = model.make_params(**self.guess())
+        result = model.fit(self.data, params, x=self.x)
+        assert_results_close(result.values, self.true_values())
 
-    def test_fit_with_keyword_params(self):
-        result = self.model.fit(self.data, x=self.x, **self.guess())
+        # Pass inidividual Parameter objects as kwargs.
+        kwargs = {name: p for name, p in params.items()}
+        result = self.model.fit(self.data, x=self.x, **kwargs)
         assert_results_close(result.values, self.true_values())
 
-    def test_fit_with_parameters_obj(self):
-        params = self.model.params
+        # Pass guess values (not Parameter objects) as kwargs.
+        kwargs = {name: p.value for name, p in params.items()}
+        result = self.model.fit(self.data, x=self.x, **kwargs)
+        assert_results_close(result.values, self.true_values())
+
+    def test_explicit_independent_vars(self):
+        self.check_skip_independent_vars()
+        model = self.model_explicit_var
+        pars = model.make_params(**self.guess())
+        result = model.fit(self.data, pars, x=self.x)
+        assert_results_close(result.values, self.true_values())
+
+    def test_fit_with_weights(self):
+        model = self.model
+
+        # fit without weights
+        params = model.make_params(**self.guess())
+        out1 = model.fit(self.data, params, x=self.x)
+
+        # fit with weights
+        weights = 1.0/(0.5 + self.x**2)
+        out2 = model.fit(self.data, params, weights=weights, x=self.x)
+
+        max_diff = 0.0
+        for parname, val1 in out1.values.items():
+            val2 = out2.values[parname]
+            if max_diff < abs(val1-val2):
+                max_diff = abs(val1-val2)
+        assert(max_diff > 1.e-8)
+
+    def test_result_attributes(self):
+        pars = self.model.make_params(**self.guess())
+        result = self.model.fit(self.data, pars, x=self.x)
+
+        # result.init_values
+        assert_results_close(result.values, self.true_values())
+        self.assertEqual(result.init_values, self.guess())
+
+        # result.init_params
+        params = self.model.make_params()
         for param_name, value in self.guess().items():
             params[param_name].value = value
-        result = self.model.fit(self.data, params, x=self.x)
+        self.assertEqual(result.init_params, params)
+
+        # result.best_fit
+        assert_allclose(result.best_fit, self.data, atol=self.noise.max())
+
+        # result.init_fit
+        init_fit = self.model.func(x=self.x, **self.guess())
+        assert_allclose(result.init_fit, init_fit)
+
+        # result.model
+        self.assertTrue(result.model is self.model)
+
+    def test_result_eval(self):
+        # Check eval() output against init_fit and best_fit.
+        pars = self.model.make_params(**self.guess())
+        result = self.model.fit(self.data, pars, x=self.x)
+
+        assert_allclose(result.eval(x=self.x, **result.values),
+                        result.best_fit)
+        assert_allclose(result.eval(x=self.x, **result.init_values),
+                        result.init_fit)
+
+    def test_result_eval_custom_x(self):
+        self.check_skip_independent_vars()
+        pars = self.model.make_params(**self.guess())
+        result = self.model.fit(self.data, pars, x=self.x)
+
+        # Check that the independent variable is respected.
+        short_eval = result.eval(x=np.array([0, 1, 2]), **result.values)
+        self.assertEqual(len(short_eval), 3)
+
+    def test_data_alignment(self):
+        _skip_if_no_pandas()
+        from pandas import Series
+
+        # Align data and indep var of different lengths using pandas index.
+        data = Series(self.data.copy()).iloc[10:-10]
+        x = Series(self.x.copy())
+
+        model = self.model
+        params = model.make_params(**self.guess())
+        result = model.fit(data, params, x=x)
+        result = model.fit(data, params, x=x)
+        assert_results_close(result.values, self.true_values())
+
+        # Skip over missing (NaN) values, aligning via pandas index.
+        data.iloc[500:510] = np.nan
+        result = self.model_drop.fit(data, params, x=x)
         assert_results_close(result.values, self.true_values())
 
+        # Raise if any NaN values are present.
+        raises = lambda: self.model_raise.fit(data, params, x=x)
+        self.assertRaises(ValueError, raises)
+
+    def check_skip_independent_vars(self):
+        # to be overridden for models that do not accept indep vars
+        pass
+
+
+class TestUserDefiniedModel(CommonTests, unittest.TestCase):
+    # mainly aimed at checking that the API does what it says it does
+    # and raises the right exceptions or warnings when things are not right
+
+    def setUp(self):
+        self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
+        self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
+        # return a fresh copy
+        self.model_constructor = (
+            lambda *args, **kwargs: Model(gaussian, *args, **kwargs))
+        super(TestUserDefiniedModel, self).setUp()
+
+    @property
+    def x(self):
+        return np.linspace(-10, 10, num=1000)
+
+    def test_lists_become_arrays(self):
+        # smoke test
+        self.model.fit([1, 2, 3], x=[1, 2, 3], **self.guess())
+        self.model.fit([1, 2, None, 3], x=[1, 2, 3, 4], **self.guess())
+
     def test_missing_param_raises_error(self):
 
         # using keyword argument parameters
@@ -45,81 +191,86 @@ class TestUserDefiniedModel(unittest.TestCase):
         #self.assertRaises(ValueError, f)
 
         # using Parameters
-        params = self.model.params
+        params = self.model.make_params()
         for param_name, value in guess_missing_sigma.items():
             params[param_name].value = value
         f = lambda: self.model.fit(self.data, params, x=self.x)
 
     def test_extra_param_issues_warning(self):
         # The function accepts extra params, Model will warn but not raise.
-        guess = self.guess()
-        guess['extra'] = 5
-
         def flexible_func(x, amplitude, center, sigma, **kwargs):
             return gaussian(x, amplitude, center, sigma)
 
         flexible_model = Model(flexible_func)
+        pars = flexible_model.make_params(**self.guess())
         with warnings.catch_warnings(record=True) as w:
             warnings.simplefilter("always")
-            flexible_model.fit(self.data, x=self.x, **guess)
+            flexible_model.fit(self.data, pars, x=self.x, extra=5)
         self.assertTrue(len(w) == 1)
         self.assertTrue(issubclass(w[-1].category, UserWarning))
 
     def test_missing_independent_variable_raises_error(self):
-        f = lambda: self.model.fit(self.data, **self.guess())
+        pars = self.model.make_params(**self.guess())
+        f = lambda: self.model.fit(self.data, pars)
         self.assertRaises(KeyError, f)
 
     def test_bounding(self):
-        guess = self.guess()
-        guess['center'] = Parameter(value=2, min=1.3)
         true_values = self.true_values()
         true_values['center'] = 1.3  # as close as it's allowed to get
-        result = self.model.fit(self.data, x=self.x, **guess)
+        pars = self.model.make_params(**self.guess())
+        pars['center'].set(value=2, min=1.3)
+        result = self.model.fit(self.data, pars, x=self.x)
         assert_results_close(result.values, true_values, rtol=0.05)
 
     def test_vary_false(self):
-        guess = self.guess()
-        guess['center'] = Parameter(value=1.3, vary=False)
         true_values = self.true_values()
         true_values['center'] = 1.3
-        result = self.model.fit(self.data, x=self.x, **guess)
+        pars = self.model.make_params(**self.guess())
+        pars['center'].set(value=1.3, vary=False)
+        result = self.model.fit(self.data, pars, x=self.x)
         assert_results_close(result.values, true_values, rtol=0.05)
 
-    def test_result_attributes(self):
-        # result.init_values
-        result = self.model.fit(self.data, x=self.x, **self.guess())
-        assert_results_close(result.values, self.true_values())
-        self.assertTrue(result.init_values == self.guess())
-
-        # result.init_params
-        params = self.model.params
-        for param_name, value in self.guess().items():
-            params[param_name].value = value
-        self.assertTrue(result.init_params == params)
-
-        # result.best_fit
-        assert_allclose(result.best_fit, self.data, atol=self.noise.max())
-
-        # result.init_fit
-        init_fit = self.model.func(x=self.x, **self.guess())
-        assert_allclose(result.init_fit, init_fit)
-
-        # result.model
-        self.assertTrue(result.model is self.model)
-
     # testing model addition...
 
     def test_user_defined_gaussian_plus_constant(self):
         data = self.data + 5.0
         model = self.model + models.ConstantModel()
         guess = self.guess()
-        guess['c'] = 10.1
+        pars = model.make_params(c= 10.1, **guess)
         true_values = self.true_values()
         true_values['c'] = 5.0
 
-        result = model.fit(data, x=self.x, **guess)
+        result = model.fit(data, pars, x=self.x)
         assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
 
+    def test_model_with_prefix(self):
+        # model with prefix of 'a' and 'b'
+        mod = models.GaussianModel(prefix='a')
+        vals = {'center': 2.45, 'sigma':0.8, 'amplitude':3.15}
+        data = gaussian(x=self.x, **vals) + self.noise/3.0
+        pars = mod.guess(data, x=self.x)
+        self.assertTrue('aamplitude' in pars)
+        self.assertTrue('asigma' in pars)
+        out = mod.fit(data, pars, x=self.x)
+        self.assertTrue(out.params['aamplitude'].value > 2.0)
+        self.assertTrue(out.params['acenter'].value > 2.0)
+        self.assertTrue(out.params['acenter'].value < 3.0)
+
+
+        mod = models.GaussianModel(prefix='b')
+        data = gaussian(x=self.x, **vals) + self.noise/3.0
+        pars = mod.guess(data, x=self.x)
+        self.assertTrue('bamplitude' in pars)
+        self.assertTrue('bsigma' in pars)
+
+    def test_change_prefix(self):
+        mod = models.GaussianModel(prefix='b')
+        mod.prefix = 'c'
+        params = mod.make_params()
+        names = params.keys()
+        all_begin_with_c = all([n.startswith('c') for n in names])
+        self.assertTrue(all_begin_with_c)
+
     def test_sum_of_two_gaussians(self):
         # two user-defined gaussians
         model1 = self.model
@@ -130,15 +281,16 @@ class TestUserDefiniedModel(unittest.TestCase):
 
         data  = gaussian(x=self.x, **values1) + f2(x=self.x, **values2) + self.noise/3.0
         model = self.model + model2
-        guess = {'sigma': Parameter(value=2, min=0),
-                 'center': Parameter(value=1, min=0.2, max=1.8),
-                 'amplitude': Parameter(value=3, min=0),
-                 'sig':  Parameter(value=1, min=0),
-                 'cen': Parameter(value=2.4, min=2, max=3.5),
-                 'amp': Parameter(value=1, min=0)}
+        pars = model.make_params()
+        pars['sigma'].set(value=2, min=0)
+        pars['center'].set(value=1, min=0.2, max=1.8)
+        pars['amplitude'].set(value=3, min=0)
+        pars['sig'].set(value=1, min=0)
+        pars['cen'].set(value=2.4, min=2, max=3.5)
+        pars['amp'].set(value=1, min=0)
 
         true_values = dict(list(values1.items()) + list(values2.items()))
-        result = model.fit(data, x=self.x, **guess)
+        result = model.fit(data, pars, x=self.x)
         assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
 
         # user-defined models with common parameter names
@@ -156,10 +308,15 @@ class TestUserDefiniedModel(unittest.TestCase):
                        'g2_center': values2['cen'],
                        'g2_amplitude': values2['amp'],
                        'g2_sigma': values2['sig']}
-        guess = {'g1_sigma': 2, 'g1_center': 1, 'g1_amplitude': 3,
-                 'g2_sigma': 1, 'g2_center': 2.4, 'g2_amplitude': 1}
-
-        result = model.fit(data, x=self.x, **guess)
+        pars = model.make_params()
+        pars['g1_sigma'].set(2)
+        pars['g1_center'].set(1)
+        pars['g1_amplitude'].set(3)
+        pars['g2_sigma'].set(1)
+        pars['g2_center'].set(2.4)
+        pars['g2_amplitude'].set(1)
+
+        result = model.fit(data, pars, x=self.x)
         assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
 
         # without suffix, the names collide and Model should raise
@@ -168,36 +325,32 @@ class TestUserDefiniedModel(unittest.TestCase):
         f = lambda: model1 + model2
         self.assertRaises(NameError, f)
 
+    def test_sum_composite_models(self):
+        # test components of composite model created adding composite model
+        model1 = models.GaussianModel(prefix='g1_')
+        model2 = models.GaussianModel(prefix='g2_')
+        model3 = models.GaussianModel(prefix='g3_')
+        model4 = models.GaussianModel(prefix='g4_')
 
-class CommonTests(object):
-    # to be subclassed for testing predefined models
+        model_total1 = (model1 + model2) + model3
+        for mod in [model1, model2, model3]:
+            self.assertTrue(mod in model_total1.components)
 
-    def setUp(self):
-        self.x = np.linspace(1, 10, num=1000)
-        noise = 0.0001*np.random.randn(*self.x.shape)
-        # Some Models need args (e.g., polynomial order), and others don't.
-        try:
-            args = self.args
-        except AttributeError:
-            self.model_instance = self.model()
-            func = self.model_instance.func
+        model_total2 = model1 + (model2 + model3)
+        for mod in [model1, model2, model3]:
+            self.assertTrue(mod in model_total2.components)
 
-        else:
-            self.model_instance = self.model(*args, independent_vars=['x'])
-            func = self.model_instance.func
-        self.data = func(x=self.x, **self.true_values()) + noise
+        model_total3 = (model1 + model2) + (model3 + model4)
+        for mod in [model1, model2, model3, model4]:
+            self.assertTrue(mod in model_total3.components)
 
-    def test_fit(self):
-        model = self.model_instance
-        result = model.fit(self.data, x=self.x, **self.guess())
-        assert_results_close(result.values, self.true_values())
 
 class TestLinear(CommonTests, unittest.TestCase):
 
     def setUp(self):
         self.true_values = lambda: dict(slope=5, intercept=2)
         self.guess = lambda: dict(slope=10, intercept=6)
-        self.model = models.LinearModel
+        self.model_constructor = models.LinearModel
         super(TestLinear, self).setUp()
 
 
@@ -206,7 +359,7 @@ class TestParabolic(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(a=5, b=2, c=8)
         self.guess = lambda: dict(a=1, b=6, c=3)
-        self.model = models.ParabolicModel
+        self.model_constructor = models.ParabolicModel
         super(TestParabolic, self).setUp()
 
 
@@ -216,7 +369,7 @@ class TestPolynomialOrder2(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(c2=5, c1=2, c0=8)
         self.guess = lambda: dict(c1=1, c2=6, c0=3)
-        self.model = models.PolynomialModel
+        self.model_constructor = models.PolynomialModel
         self.args = (2,)
         super(TestPolynomialOrder2, self).setUp()
 
@@ -227,7 +380,7 @@ class TestPolynomialOrder3(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(c3=2, c2=5, c1=2, c0=8)
         self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3)
-        self.model = models.PolynomialModel
+        self.model_constructor = models.PolynomialModel
         self.args = (3,)
         super(TestPolynomialOrder3, self).setUp()
 
@@ -237,16 +390,18 @@ class TestConstant(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(c=5)
         self.guess = lambda: dict(c=2)
-        self.model = models.ConstantModel
+        self.model_constructor = models.ConstantModel
         super(TestConstant, self).setUp()
 
+    def check_skip_independent_vars(self):
+        raise nose.SkipTest("ConstantModel has not independent_vars.")
 
 class TestPowerlaw(CommonTests, unittest.TestCase):
 
     def setUp(self):
         self.true_values = lambda: dict(amplitude=5, exponent=3)
         self.guess = lambda: dict(amplitude=2, exponent=8)
-        self.model = models.PowerLawModel
+        self.model_constructor = models.PowerLawModel
         super(TestPowerlaw, self).setUp()
 
 
@@ -255,5 +410,5 @@ class TestExponential(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(amplitude=5, decay=3)
         self.guess = lambda: dict(amplitude=2, decay=8)
-        self.model = models.ExponentialModel
+        self.model_constructor = models.ExponentialModel
         super(TestExponential, self).setUp()
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
index 9408c77..31a67c7 100644
--- a/tests/test_stepmodel.py
+++ b/tests/test_stepmodel.py
@@ -17,41 +17,43 @@ def get_data():
 def test_stepmodel_linear():
     x, y = get_data()
     stepmod = StepModel(form='linear')
-    stepmod.guess_starting_values(y, x)
+    const = ConstantModel()
+    pars = stepmod.guess(y, x)
+    pars = pars + const.make_params(c=3*y.min())
+    mod = stepmod + const
 
-    mod = stepmod + ConstantModel()
-    mod.set_paramval('c', 3*y.min())
-    out = mod.fit(y, x=x)
+    out = mod.fit(y, pars, x=x)
 
     assert(out.nfev > 5)
     assert(out.nvarys == 4)
     assert(out.chisqr > 1)
-    assert(mod.params['c'].value > 3)
-    assert(mod.params['center'].value > 1)
-    assert(mod.params['center'].value < 4)
-    assert(mod.params['sigma'].value > 0.5)
-    assert(mod.params['sigma'].value < 3.5)
-    assert(mod.params['amplitude'].value > 50)
+    assert(out.params['c'].value > 3)
+    assert(out.params['center'].value > 1)
+    assert(out.params['center'].value < 4)
+    assert(out.params['sigma'].value > 0.5)
+    assert(out.params['sigma'].value < 3.5)
+    assert(out.params['amplitude'].value > 50)
 
 
 def test_stepmodel_erf():
     x, y = get_data()
-    stepmod = StepModel(form='erf')
-    stepmod.guess_starting_values(y, x)
+    stepmod = StepModel(form='linear')
+    const = ConstantModel()
+    pars = stepmod.guess(y, x)
+    pars = pars + const.make_params(c=3*y.min())
+    mod = stepmod + const
 
-    mod = stepmod + ConstantModel()
-    mod.set_paramval('c', 3) # *y.min())
+    out = mod.fit(y, pars, x=x)
 
-    out = mod.fit(y, x=x)
     assert(out.nfev > 5)
     assert(out.nvarys == 4)
     assert(out.chisqr > 1)
-    assert(mod.params['c'].value > 3)
-    assert(mod.params['center'].value > 1)
-    assert(mod.params['center'].value < 4)
-    assert(mod.params['amplitude'].value > 50)
-    assert(mod.params['sigma'].value > 0.2)
-    assert(mod.params['sigma'].value < 1.5)
+    assert(out.params['c'].value > 3)
+    assert(out.params['center'].value > 1)
+    assert(out.params['center'].value < 4)
+    assert(out.params['amplitude'].value > 50)
+    assert(out.params['sigma'].value > 0.2)
+    assert(out.params['sigma'].value < 1.5)
 
 if __name__ == '__main__':
     # test_stepmodel_linear()
diff --git a/tests/test_wrap_function.py b/tests/test_wrap_function.py
deleted file mode 100644
index f6bec38..0000000
--- a/tests/test_wrap_function.py
+++ /dev/null
@@ -1,31 +0,0 @@
-
-from lmfit import wrap_function
-
-def test_wrap_function():
-    get_names = lambda p: [p_key for p_key in p ]
-
-    def func(A, b, c, d=5., e=10):
-        return float(A + b + c + d)
-
-    x0 = [1, 2, 3]
-    para, f = wrap_function(func, x0)
-    assert(get_names(para) == ['A', 'b', 'c', 'd', 'e'])
-    y1 = f(para)
-    y2 = func(*x0)
-    assert(y1==y2)
-
-    x0 = [1, 2, 3, 4]
-    para, f = wrap_function(func, x0)
-    assert(get_names(para) == ['A', 'b', 'c', 'd', 'e'])
-    y1 = f(para)
-    y2 = func(*x0)
-    assert(y1==y2)
-
-    x0 = [9.2, 2.0, 7.]
-    para, f = wrap_function(func, x0)
-    assert(get_names(para) == ['A', 'b', 'c', 'd', 'e'])
-    y1 = f(para)
-    y2 = func(*x0)
-    assert(y1==y2)
-
-test_wrap_function()

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git



More information about the debian-science-commits mailing list