[lmfit-py] 01/03: Imported Upstream version 0.9.5+dfsg

Frédéric-Emmanuel Picca picca at moszumanska.debian.org
Wed Aug 17 10:02:01 UTC 2016


This is an automated email from the git hooks/post-receive script.

picca pushed a commit to branch master
in repository lmfit-py.

commit e1a98b942a00c8f8cc663535ca1e1c050a922ce3
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date:   Wed Aug 17 08:24:30 2016 +0200

    Imported Upstream version 0.9.5+dfsg
---
 INSTALL                                         |  11 +-
 LICENSE                                         |  34 ++
 PKG-INFO                                        |   2 +-
 doc/__pycache__/extensions.cpython-35.pyc       | Bin 358 -> 0 bytes
 doc/_images/model_fit3a.png                     | Bin 30423 -> 32927 bytes
 doc/_images/model_fit3b.png                     | Bin 29907 -> 32553 bytes
 doc/_images/models_stepfit.png                  | Bin 31966 -> 33898 bytes
 doc/builtin_models.rst                          | 313 +++++++------
 doc/confidence.rst                              |  28 +-
 doc/extensions.pyc                              | Bin 398 -> 398 bytes
 doc/faq.rst                                     |   4 +-
 doc/fitting.rst                                 | 391 ++++++----------
 doc/index.rst                                   |  11 +-
 doc/installation.rst                            |  37 +-
 doc/intro.rst                                   |   8 +-
 doc/model.rst                                   | 124 ++---
 doc/parameters.rst                              |  62 +--
 doc/testlinks.py                                |   9 +
 doc/testlinks.py.~1~                            |   0
 doc/whatsnew.rst                                |  39 ++
 lmfit/__init__.py                               |  20 +-
 lmfit/_version.py                               |   4 +-
 lmfit/asteval.py                                |  18 +-
 lmfit/astutils.py                               |   4 +-
 lmfit/lineshapes.py                             |   4 +-
 lmfit/minimizer.py                              | 579 +++++++++++++++++-------
 lmfit/model.py                                  |  84 +++-
 lmfit/models.py                                 |   2 +-
 lmfit/parameter.py                              |  33 +-
 lmfit/printfuncs.py                             |   8 +-
 lmfit/ui/basefitter.py                          |   2 +-
 requirements.txt                                |   2 +-
 tests/_test_ci.py                               |   9 +-
 tests/test_1variable.py                         |   6 +-
 tests/test_algebraic_constraint.py              |  24 +-
 tests/test_algebraic_constraint2.py             |  16 +-
 tests/test_basicfit.py                          |   8 +-
 tests/test_bounded_jacobian.py                  |   8 +-
 tests/test_bounds.py                            |   8 +-
 tests/test_confidence.py                        |   9 +-
 tests/test_copy_params.py                       |   6 +-
 tests/{test_bounds.py => test_least_squares.py} |  19 +-
 tests/test_model.py                             |  21 +-
 tests/test_multidatasets.py                     |   6 +-
 tests/test_nose.py                              | 103 +++--
 tests/test_params_set.py                        |   2 +-
 46 files changed, 1178 insertions(+), 900 deletions(-)

diff --git a/INSTALL b/INSTALL
index 712b012..7179fb6 100644
--- a/INSTALL
+++ b/INSTALL
@@ -6,7 +6,14 @@ To install the lmfit python module, use::
    python setup.py build
    python setup.py install
 
-Python 2.6 or higher is required, as are numpy and scipy.
+For lmfit 0.9.4, the following versions are required:
+  Python: 2.6, 2.7, 3.3, 3.4, or 3.5
+  Numpy:  1.5 or higher
+  Scipy:  0.13 or higher
+
+Note that Python 2.6 and scipy 0.13 are deprecated, and
+support and testing with them will be dropped in 0.9.5.
+
 
 Matt Newville <newville at cars.uchicago.edu>
-Last Update:  2013-Dec-15
+Last Update:  2016-July-1
diff --git a/LICENSE b/LICENSE
index 174874e..09a46ff 100644
--- a/LICENSE
+++ b/LICENSE
@@ -25,3 +25,37 @@ The LMFIT-py code is distribution under the following license:
   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   DEALINGS IN THIS SOFTWARE.
+
+-----------------------------------------
+Some code sections have been taken from the scipy library whose licence is below.
+
+Copyright (c) 2001, 2002 Enthought, Inc.
+All rights reserved.
+
+Copyright (c) 2003-2016 SciPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+  a. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+  b. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+  c. Neither the name of Enthought nor the names of the SciPy Developers
+     may be used to endorse or promote products derived from this software
+     without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/PKG-INFO b/PKG-INFO
index 3842e9b..ad03cb0 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: lmfit
-Version: 0.9.3
+Version: 0.9.5
 Summary: Least-Squares Minimization with Bounds and Constraints
 Home-page: http://lmfit.github.io/lmfit-py/
 Author: LMFit Development Team
diff --git a/doc/__pycache__/extensions.cpython-35.pyc b/doc/__pycache__/extensions.cpython-35.pyc
deleted file mode 100644
index a9f415c..0000000
Binary files a/doc/__pycache__/extensions.cpython-35.pyc and /dev/null differ
diff --git a/doc/_images/model_fit3a.png b/doc/_images/model_fit3a.png
index 26242e4..18ac650 100644
Binary files a/doc/_images/model_fit3a.png and b/doc/_images/model_fit3a.png differ
diff --git a/doc/_images/model_fit3b.png b/doc/_images/model_fit3b.png
index c2e273d..48ba696 100644
Binary files a/doc/_images/model_fit3b.png and b/doc/_images/model_fit3b.png differ
diff --git a/doc/_images/models_stepfit.png b/doc/_images/models_stepfit.png
index c1b5cd8..005ba88 100644
Binary files a/doc/_images/models_stepfit.png and b/doc/_images/models_stepfit.png differ
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index d1e68b6..7c1e06b 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -4,7 +4,7 @@
 Built-in Fitting Models in the :mod:`models` module
 =====================================================
 
-.. module:: models
+.. module:: lmfit.models
 
 Lmfit provides several builtin fitting models in the :mod:`models` module.
 These pre-defined models each subclass from the :class:`model.Model` class of the
@@ -289,7 +289,7 @@ It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
     f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
   e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
       {\operatorname{erf}}\bigl[
-         \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
+	 \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
      \bigr] \Bigr\}
 
 
@@ -560,16 +560,16 @@ could to define this in a script::
 
     >>> script = """
     def mycurve(x, amp, cen, sig):
-        loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
-        gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
-        return log(loren)*gradient(gauss)/gradient(x)
+	loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
+	gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
+	return log(loren)*gradient(gauss)/gradient(x)
     """
 
 and then use this with :class:`ExpressionModel` as::
 
     >>> mod = ExpressionModel('mycurve(x, height, mid, wid)',
-                              init_script=script,
-                              independent_vars=['x'])
+			      init_script=script,
+			      independent_vars=['x'])
 
 As above, this will interpret the parameter names to be `height`, `mid`,
 and `wid`, and build a model that can be used to fit data.
@@ -603,23 +603,23 @@ built-in default values.  We'll simply use::
 which prints out the results::
 
     [[Model]]
-        gaussian
+	Model(gaussian)
     [[Fit Statistics]]
-        # function evals   = 23
-        # data points      = 401
-        # variables        = 3
-        chi-square         = 29.994
-        reduced chi-square = 0.075
-        Akaike info crit   = -1030.763
-        Bayesian info crit = -1018.781
+	# function evals   = 23
+	# data points      = 401
+	# variables        = 3
+	chi-square         = 29.994
+	reduced chi-square = 0.075
+	Akaike info crit   = -1033.774
+	Bayesian info crit = -1021.792
     [[Variables]]
-        sigma:       1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
-        fwhm:        2.90156963 +/- 0.017366 (0.60%)  == '2.3548200*sigma'
-        height:      9.81457973 +/- 0.050872 (0.52%)  == '0.3989423*amplitude/sigma'
-        center:      9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
-        amplitude:   30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
+	sigma:       1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
+	center:      9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
+	amplitude:   30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
+	fwhm:        2.90156963 +/- 0.017366 (0.60%)  == '2.3548200*sigma'
+	height:      9.81457973 +/- 0.050872 (0.52%)  == '0.3989423*amplitude/max(1.e-15, sigma)'
     [[Correlations]] (unreported correlations are <  0.250)
-        C(sigma, amplitude)          =  0.577
+	C(sigma, amplitude)          =  0.577
 
 We see a few interesting differences from the results of the previous
 chapter. First, the parameter names are longer. Second, there are ``fwhm``
@@ -652,24 +652,23 @@ with the rest of the script as above.  Perhaps predictably, the first thing
 we try gives results that are worse::
 
     [[Model]]
-        Model(lorentzian)
+	Model(lorentzian)
     [[Fit Statistics]]
-        # function evals   = 27
-        # data points      = 401
-        # variables        = 3
-        chi-square         = 53.754
-        reduced chi-square = 0.135
-        Akaike info crit   = -796.819
-        Bayesian info crit = -784.837
+	# function evals   = 27
+	# data points      = 401
+	# variables        = 3
+	chi-square         = 53.754
+	reduced chi-square = 0.135
+	Akaike info crit   = -799.830
+	Bayesian info crit = -787.848
     [[Variables]]
-        sigma:       1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
-        fwhm:        2.30969034 +/- 0.026312 (1.14%)  == '2.0000000*sigma'
-        height:      10.7420881 +/- 0.086336 (0.80%)  ==
-        '0.3183099*amplitude/sigm    a'
-        center:      9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
-        amplitude:   38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
+	sigma:       1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
+	center:      9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
+	amplitude:   38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
+	fwhm:        2.30969034 +/- 0.026312 (1.14%)  == '2.0000000*sigma'
+	height:      10.7420881 +/- 0.086336 (0.80%)  == '0.3183099*amplitude/max(1.e-15, sigma)'
     [[Correlations]] (unreported correlations are <  0.250)
-        C(sigma, amplitude)          =  0.709
+	C(sigma, amplitude)          =  0.709
 
 
 with the plot shown on the right in the figure above.  The tails are now
@@ -682,24 +681,24 @@ does a better job.  Using :class:`VoigtModel`, this is as simple as using::
 with all the rest of the script as above.  This gives::
 
     [[Model]]
-        Model(voigt)
+	Model(voigt)
     [[Fit Statistics]]
-        # function evals   = 19
-        # data points      = 401
-        # variables        = 3
-        chi-square         = 14.545
-        reduced chi-square = 0.037
-        Akaike info crit   = -1320.995
-        Bayesian info crit = -1309.013
+	# function evals   = 19
+	# data points      = 401
+	# variables        = 3
+	chi-square         = 14.545
+	reduced chi-square = 0.037
+	Akaike info crit   = -1324.006
+	Bayesian info crit = -1312.024
     [[Variables]]
-        sigma:       0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
-        gamma:       0.73015574 +/- 0.003684 (0.50%)  == 'sigma'
-        fwhm:        2.62951718 +/- 0.013269 (0.50%)  == '3.6013100*sigma'
-        height:      19.5360268 +/- 0.075691 (0.39%)  ==  '0.3989423*amplitude/sigm    a'
-        center:      9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
-        amplitude:   35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
+	amplitude:   35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
+	sigma:       0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
+	center:      9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
+	gamma:       0.73015574 +/- 0.003684 (0.50%)  == 'sigma'
+	fwhm:        2.62951718 +/- 0.013269 (0.50%)  == '3.6013100*sigma'
+	height:      19.5360268 +/- 0.075691 (0.39%)  == '0.3989423*amplitude/max(1.e-15, sigma)'
     [[Correlations]] (unreported correlations are <  0.250)
-        C(sigma, amplitude)          =  0.651
+	C(sigma, amplitude)          =  0.651
 
 
 which has a much better value for :math:`\chi^2` and an obviously better
@@ -732,27 +731,26 @@ give it a starting value using something like::
 which gives::
 
     [[Model]]
-        Model(voigt)
+	Model(voigt)
     [[Fit Statistics]]
-        # function evals   = 23
-        # data points      = 401
-        # variables        = 4
-        chi-square         = 10.930
-        reduced chi-square = 0.028
-        Akaike info crit   = -1432.556
-        Bayesian info crit = -1416.580
+	# function evals   = 23
+	# data points      = 401
+	# variables        = 4
+	chi-square         = 10.930
+	reduced chi-square = 0.028
+	Akaike info crit   = -1436.576
+	Bayesian info crit = -1420.600
     [[Variables]]
-        sigma:       0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
-        gamma:       0.52540156 +/- 0.018579 (3.54%) (init= 0.7)
-        fwhm:        3.22385492 +/- 0.050974 (1.58%)  == '3.6013100*sigma'
-        height:      15.2374711 +/- 0.299235 (1.96%)  ==
-    '0.3989423*amplitude/sigm    a'
-        center:      9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
-        amplitude:   34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
+	amplitude:   34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
+	sigma:       0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
+	center:      9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
+	gamma:       0.52540156 +/- 0.018579 (3.54%) (init= 0.7)
+	fwhm:        3.22385492 +/- 0.050974 (1.58%)  == '3.6013100*sigma'
+	height:      15.2374711 +/- 0.299235 (1.96%)  == '0.3989423*amplitude/max(1.e-15, sigma)'
     [[Correlations]] (unreported correlations are <  0.250)
-        C(sigma, gamma)              = -0.928
-        C(gamma, amplitude)          =  0.821
-        C(sigma, amplitude)          = -0.651
+	C(sigma, gamma)              = -0.928
+	C(gamma, amplitude)          =  0.821
+	C(sigma, amplitude)          = -0.651
 
 and the fit shown on the right above.
 
@@ -797,32 +795,31 @@ After making a composite model, we run :meth:`fit` and report the
 results, which gives::
 
     [[Model]]
-    [[Model]]
-        (Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_'))
+	(Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_'))
     [[Fit Statistics]]
-        # function evals   = 51
-        # data points      = 201
-        # variables        = 5
-        chi-square         = 648.584
-        reduced chi-square = 3.309
-        Akaike info crit   = 250.532
-        Bayesian info crit = 267.048
+	# function evals   = 51
+	# data points      = 201
+	# variables        = 5
+	chi-square         = 584.829
+	reduced chi-square = 2.984
+	Akaike info crit   = 224.671
+	Bayesian info crit = 241.187
     [[Variables]]
-        line_slope:       2.06986083 +/- 0.097005 (4.69%) (init= 0)
-        line_intercept:   11.7526825 +/- 0.288725 (2.46%) (init= 10.7017)
-        step_center:      3.12329688 +/- 0.005441 (0.17%) (init= 2.5)
-        step_sigma:       0.67050317 +/- 0.011480 (1.71%) (init= 1.428571)
-        step_amplitude:   111.673928 +/- 0.681024 (0.61%) (init= 134.6809)
+	line_slope:       2.03039786 +/- 0.092221 (4.54%) (init= 0)
+	line_intercept:   11.7234542 +/- 0.274094 (2.34%) (init= 10.7816)
+	step_amplitude:   112.071629 +/- 0.647316 (0.58%) (init= 134.0885)
+	step_sigma:       0.67132341 +/- 0.010873 (1.62%) (init= 1.428571)
+	step_center:      3.12697699 +/- 0.005151 (0.16%) (init= 2.5)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(line_slope, step_amplitude)  = -0.878
-        C(step_sigma, step_amplitude)  =  0.563
-        C(line_slope, step_sigma)    = -0.455
-        C(line_intercept, step_center)  =  0.426
-        C(line_slope, line_intercept)  = -0.307
-        C(line_slope, step_center)   = -0.234
-        C(line_intercept, step_sigma)  = -0.139
-        C(line_intercept, step_amplitude)  = -0.122
-        C(step_center, step_amplitude)  =  0.108
+	C(line_slope, step_amplitude)  = -0.878
+	C(step_amplitude, step_sigma)  =  0.563
+	C(line_slope, step_sigma)    = -0.455
+	C(line_intercept, step_center)  =  0.427
+	C(line_slope, line_intercept)  = -0.308
+	C(line_slope, step_center)   = -0.234
+	C(line_intercept, step_sigma)  = -0.139
+	C(line_intercept, step_amplitude)  = -0.121
+	C(step_amplitude, step_center)  =  0.109
 
 with a plot of
 
@@ -860,42 +857,39 @@ parameter values.
 The fit results printed out are::
 
     [[Model]]
-        ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
+	((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
     [[Fit Statistics]]
-        # function evals   = 66
-        # data points      = 250
-        # variables        = 8
-        chi-square         = 1247.528
-        reduced chi-square = 5.155
-        Akaike info crit   = 425.995
-        Bayesian info crit = 454.167
+	# function evals   = 66
+	# data points      = 250
+	# variables        = 8
+	chi-square         = 1247.528
+	reduced chi-square = 5.155
+	Akaike info crit   = 417.865
+	Bayesian info crit = 446.036
     [[Variables]]
-        exp_amplitude:   99.0183282 +/- 0.537487 (0.54%) (init= 162.2102)
-        exp_decay:       90.9508861 +/- 1.103105 (1.21%) (init= 93.24905)
-        g1_amplitude:    4257.77318 +/- 42.38336 (1.00%) (init= 2000)
-        g1_sigma:        16.6725753 +/- 0.160481 (0.96%) (init= 15)
-        g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 105)
-        g1_fwhm:         39.2609137 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
-        g1_height:       101.880231 +/- 0.592170 (0.58%)  ==
-        '0.3989423*g1_amplitude/g1_    sigma'
-        g2_amplitude:    2493.41770 +/- 36.16947 (1.45%) (init= 2000)
-        g2_sigma:        13.8069484 +/- 0.186794 (1.35%) (init= 15)
-        g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 155)
-        g2_fwhm:         32.5128783 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
-        g2_height:       72.0455934 +/- 0.617220 (0.86%)  ==
-        '0.3989423*g2_amplitude/g2_    sigma'
+	exp_amplitude:   99.0183282 +/- 0.537487 (0.54%) (init= 162.2102)
+	exp_decay:       90.9508859 +/- 1.103105 (1.21%) (init= 93.24905)
+	g1_sigma:        16.6725753 +/- 0.160481 (0.96%) (init= 15)
+	g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 105)
+	g1_amplitude:    4257.77319 +/- 42.38336 (1.00%) (init= 2000)
+	g1_fwhm:         39.2609139 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
+	g1_height:       101.880231 +/- 0.592170 (0.58%)  == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)'
+	g2_sigma:        13.8069484 +/- 0.186794 (1.35%) (init= 15)
+	g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 155)
+	g2_amplitude:    2493.41770 +/- 36.16947 (1.45%) (init= 2000)
+	g2_fwhm:         32.5128782 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
+	g2_height:       72.0455934 +/- 0.617220 (0.86%)  == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)'
     [[Correlations]] (unreported correlations are <  0.500)
-        C(g1_amplitude, g1_sigma)    =  0.824
-        C(g2_amplitude, g2_sigma)    =  0.815
-        C(exp_amplitude, exp_decay)  = -0.695
-        C(g1_sigma, g2_center)       =  0.684
-        C(g1_center, g2_amplitude)   = -0.669
-        C(g1_center, g2_sigma)       = -0.652
-        C(g1_amplitude, g2_center)   =  0.648
-        C(g1_center, g2_center)      =  0.621
-        C(g1_sigma, g1_center)       =  0.507
-        C(exp_decay, g1_amplitude)   = -0.507
-
+	C(g1_sigma, g1_amplitude)    =  0.824
+	C(g2_sigma, g2_amplitude)    =  0.815
+	C(exp_amplitude, exp_decay)  = -0.695
+	C(g1_sigma, g2_center)       =  0.684
+	C(g1_center, g2_amplitude)   = -0.669
+	C(g1_center, g2_sigma)       = -0.652
+	C(g1_amplitude, g2_center)   =  0.648
+	C(g1_center, g2_center)      =  0.621
+	C(g1_sigma, g1_center)       =  0.507
+	C(exp_decay, g1_amplitude)   = -0.507
 
 We get a very good fit to this problem (described at the NIST site as of
 average difficulty, but the tests there are generally deliberately challenging) by
@@ -921,9 +915,9 @@ this, and by defining an :func:`index_of` function to limit the data range.
 That is, with::
 
     def index_of(arrval, value):
-        "return index of array *at or below* value "
-        if value < min(arrval):  return 0
-        return max(np.where(arrval<=value)[0])
+	"return index of array *at or below* value "
+	if value < min(arrval):  return 0
+	return max(np.where(arrval<=value)[0])
 
     ix1 = index_of(x,  75)
     ix2 = index_of(x, 135)
@@ -938,42 +932,39 @@ giving to identical values (to the precision printed out in the report),
 but in few steps, and without any bounds on parameters at all::
 
     [[Model]]
-        ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) +  Model(exponential,  prefix='exp_'))
+	((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) +  Model(exponential,  prefix='exp_'))
     [[Fit Statistics]]
-        # function evals   = 48
-        # data points      = 250
-        # variables        = 8
-        chi-square         = 1247.528
-        reduced chi-square = 5.155
-        Akaike info crit   = 425.995
-        Bayesian info crit = 454.167
+	# function evals   = 48
+	# data points      = 250
+	# variables        = 8
+	chi-square         = 1247.528
+	reduced chi-square = 5.155
+	Akaike info crit   = 417.865
+	Bayesian info crit = 446.036
     [[Variables]]
-        exp_amplitude:   99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
-        exp_decay:       90.9508862 +/- 1.103105 (1.21%) (init= 111.1985)
-        g1_amplitude:    4257.77322 +/- 42.38338 (1.00%) (init= 2126.432)
-        g1_sigma:        16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
-        g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 106.5)
-        g1_fwhm:         39.2609141 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
-        g1_height:       101.880231 +/- 0.592171 (0.58%)  ==
-        '0.3989423*g1_amplitude/g1_    sigma'
-        g2_amplitude:    2493.41766 +/- 36.16947 (1.45%) (init= 1878.892)
-        g2_sigma:        13.8069481 +/- 0.186794 (1.35%) (init= 15)
-        g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 150)
-        g2_fwhm:         32.5128777 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
-        g2_height:       72.0455935 +/- 0.617221 (0.86%)  ==
-        '0.3989423*g2_amplitude/g2_    sigma'
+	exp_amplitude:   99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
+	exp_decay:       90.9508862 +/- 1.103105 (1.21%) (init= 111.1985)
+	g1_sigma:        16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
+	g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 106.5)
+	g1_amplitude:    4257.77322 +/- 42.38338 (1.00%) (init= 2126.432)
+	g1_fwhm:         39.2609141 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
+	g1_height:       101.880231 +/- 0.592171 (0.58%)  == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)'
+	g2_sigma:        13.8069481 +/- 0.186794 (1.35%) (init= 15)
+	g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 150)
+	g2_amplitude:    2493.41766 +/- 36.16948 (1.45%) (init= 1878.892)
+	g2_fwhm:         32.5128777 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
+	g2_height:       72.0455935 +/- 0.617221 (0.86%)  == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)'
     [[Correlations]] (unreported correlations are <  0.500)
-        C(g1_amplitude, g1_sigma)    =  0.824
-        C(g2_amplitude, g2_sigma)    =  0.815
-        C(exp_amplitude, exp_decay)  = -0.695
-        C(g1_sigma, g2_center)       =  0.684
-        C(g1_center, g2_amplitude)   = -0.669
-        C(g1_center, g2_sigma)       = -0.652
-        C(g1_amplitude, g2_center)   =  0.648
-        C(g1_center, g2_center)      =  0.621
-        C(g1_sigma, g1_center)       =  0.507
-        C(exp_decay, g1_amplitude)   = -0.507
-
+	C(g1_sigma, g1_amplitude)    =  0.824
+	C(g2_sigma, g2_amplitude)    =  0.815
+	C(exp_amplitude, exp_decay)  = -0.695
+	C(g1_sigma, g2_center)       =  0.684
+	C(g1_center, g2_amplitude)   = -0.669
+	C(g1_center, g2_sigma)       = -0.652
+	C(g1_amplitude, g2_center)   =  0.648
+	C(g1_center, g2_center)      =  0.621
+	C(g1_sigma, g1_center)       =  0.507
+	C(exp_decay, g1_amplitude)   = -0.507
 
 This script is in the file ``doc_nistgauss2.py`` in the examples folder,
 and the fit result shown on the right above shows an improved initial
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 67c4f97..54d2970 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -3,7 +3,7 @@
 Calculation of confidence intervals
 ====================================
 
-.. module:: confidence
+.. module:: lmfit.confidence
 
 The lmfit :mod:`confidence` module allows you to explicitly calculate
 confidence intervals for variable parameters.  For most models, it is not
@@ -63,17 +63,17 @@ starting point::
     >>> result = mini.minimize()
     >>> print(lmfit.fit_report(result.params))
     [Variables]]
-        a:   0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
-        b:   1.98476945 +/- 0.012226 (0.62%) (init= 1)
+	a:   0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
+	b:   1.98476945 +/- 0.012226 (0.62%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(a, b)                      =  0.601
+	C(a, b)                      =  0.601
 
 Now it is just a simple function call to calculate the confidence
 intervals::
 
     >>> ci = lmfit.conf_interval(mini, result)
     >>> lmfit.printfuncs.report_ci(ci)
-         99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
+	 99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
     a   0.09886   0.09905   0.09925   0.09944   0.09963   0.09982   0.10003
     b   1.94751   1.96049   1.97274   1.97741   1.99680   2.00905   2.02203
 
@@ -103,16 +103,16 @@ uncertainties and correlations
 which will report::
 
     [[Variables]]
-        a1:   2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
-        a2:  -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
-        t1:   1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
-        t2:   11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
+	a1:   2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
+	a2:  -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
+	t1:   1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
+	t2:   11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
     [[Correlations]] (unreported correlations are <  0.500)
-        C(a2, t2)                    =  0.987
-        C(a2, t1)                    = -0.925
-        C(t1, t2)                    = -0.881
-        C(a1, t1)                    = -0.599
-          95.00%    68.00%     0.00%    68.00%    95.00%
+	C(a2, t2)                    =  0.987
+	C(a2, t1)                    = -0.925
+	C(t1, t2)                    = -0.881
+	C(a1, t1)                    = -0.599
+	  95.00%    68.00%     0.00%    68.00%    95.00%
     a1   2.71850   2.84525   2.98622   3.14874   3.34076
     a2  -4.63180  -4.46663  -4.33526  -4.22883  -4.14178
     t2  10.82699  11.33865  11.82404  12.28195  12.71094
diff --git a/doc/extensions.pyc b/doc/extensions.pyc
index 2ea9e41..38be3f3 100644
Binary files a/doc/extensions.pyc and b/doc/extensions.pyc differ
diff --git a/doc/faq.rst b/doc/faq.rst
index 1bf26f9..f21a7a2 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -56,8 +56,8 @@ example, here is a residual function to simultaneously fit two lines to two
 different arrays.  As a bonus, the two lines share the 'offset' parameter::
 
     def fit_function(params, x=None, dat1=None, dat2=None):
-        model1 = params['offset'].value + x * params['slope1'].value
-        model2 = params['offset'].value + x * params['slope2'].value
+        model1 = params['offset'] + x * params['slope1']
+        model2 = params['offset'] + x * params['slope2']
 
 	resid1 = dat1 - model1
         resid2 = dat2 - model2
diff --git a/doc/fitting.rst b/doc/fitting.rst
index cde5d89..0783ade 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -1,5 +1,7 @@
 .. _minimize_chapter:
 
+.. module:: lmfit.minimizer
+
 =======================================
 Performing Fits, Analyzing Outputs
 =======================================
@@ -15,7 +17,7 @@ when using complicated constraints or comparing results from related fits.
 
 
 The :func:`minimize` function
-===============================
+=============================
 
 The :func:`minimize` function is a wrapper around :class:`Minimizer` for
 running an optimization problem.  It takes an objective function (the
@@ -23,53 +25,11 @@ function that calculates the array to be minimized), a :class:`Parameters`
 object, and several optional arguments.  See :ref:`fit-func-label` for
 details on writing the objective.
 
-.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **fit_kws]]]]]])
-
-   find values for the ``params`` so that the sum-of-squares of the array returned
-   from ``function`` is minimized.
-
-   :param function:  function to return fit residual.  See :ref:`fit-func-label` for details.
-   :type  function:  callable.
-   :param params:  a :class:`Parameters` dictionary.  Keywords must be strings
-                   that match ``[a-z_][a-z0-9_]*`` and cannot be a python
-                   reserved word.  Each value must be :class:`Parameter`.
-   :type  params:  :class:`Parameters`.
-   :param args:  arguments tuple to pass to the residual function as  positional arguments.
-   :type  args:  tuple
-   :param kws:   dictionary to pass to the residual function as keyword arguments.
-   :type  kws:  dict
-   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
-   :type  method:  string (default ``leastsq``)
-   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
-   :type  scale_covar:  bool (default ``True``)
-   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
-   :type  iter_cb:  callable or ``None``
-   :param fit_kws:  dictionary to pass to :scipydoc:`optimize.leastsq` or :scipydoc:`optimize.minimize`.
-   :type  fit_kws:  dict
-
-   :return: :class:`MinimizerResult` instance, which will contain the
-            optimized parameter, and several goodness-of-fit statistics.
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-
-   On output, the params will be unchanged.  The best-fit values, and where
-   appropriate, estimated uncertainties and correlations, will all be
-   contained in the returned :class:`MinimizerResult`.  See
-   :ref:`fit-results-label` for further details.
-
-   For clarity, it should be emphasized that this function is simply a
-   wrapper around :class:`Minimizer` that runs a single fit, implemented as::
-
-    fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
-                       iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
-    return fitter.minimize(method=method)
+.. autofunction:: minimize
 
 
 ..  _fit-func-label:
 
-
 Writing a Fitting Function
 ===============================
 
@@ -110,26 +70,26 @@ simple way to do this is with :meth:`Parameters.valuesdict`, as with::
 
 
     def residual(pars, x, data=None, eps=None):
-        # unpack parameters:
-        #  extract .value attribute for each parameter
+	# unpack parameters:
+	#  extract .value attribute for each parameter
 	parvals = pars.valuesdict()
-        period = parvals['period']
-        shift = parvals['shift']
-        decay = parvals['decay']
+	period = parvals['period']
+	shift = parvals['shift']
+	decay = parvals['decay']
 
-        if abs(shift) > pi/2:
-            shift = shift - sign(shift)*pi
+	if abs(shift) > pi/2:
+	    shift = shift - sign(shift)*pi
 
-        if abs(period) < 1.e-10:
-            period = sign(period)*1.e-10
+	if abs(period) < 1.e-10:
+	    period = sign(period)*1.e-10
 
-        model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
+	model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
 
-        if data is None:
-            return model
+	if data is None:
+	    return model
 	if eps is None:
-            return (model - data)
-        return (model - data)/eps
+	    return (model - data)
+	return (model - data)/eps
 
 In this example, ``x`` is a positional (required) argument, while the
 ``data`` array is actually optional (so that the function returns the model
@@ -142,8 +102,8 @@ to use the bounds on the :class:`Parameter` to do this::
 
 but putting this directly in the function with::
 
-        if abs(period) < 1.e-10:
-            period = sign(period)*1.e-10
+	if abs(period) < 1.e-10:
+	    period = sign(period)*1.e-10
 
 is also a reasonable approach.   Similarly, one could place bounds on the
 ``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
@@ -173,7 +133,7 @@ class as listed in the :ref:`Table of Supported Fitting Methods
  +-----------------------+------------------------------------------------------------------+
  | Fitting Method        | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` |
  +=======================+==================================================================+
- | Levenberg-Marquardt   |  ``leastsq``                                                     |
+ | Levenberg-Marquardt   |  ``leastsq`` or ``least_squares``                                |
  +-----------------------+------------------------------------------------------------------+
  | Nelder-Mead           |  ``nelder``                                                      |
  +-----------------------+------------------------------------------------------------------+
@@ -218,10 +178,6 @@ class as listed in the :ref:`Table of Supported Fitting Methods
 :class:`MinimizerResult` -- the optimization result
 ========================================================
 
-
-
-.. class:: MinimizerResult(**kws)
-
 .. versionadded:: 0.9.0
 
 An optimization with :func:`minimize` or :meth:`Minimizer.minimize`
@@ -236,83 +192,7 @@ will be not be changed.  To to find the best-fit values, uncertainties
 and so on for each parameter, one must use the
 :attr:`MinimizerResult.params` attribute.
 
-.. attribute::   params
-
-  the :class:`Parameters` actually used in the fit, with updated
-  values, :attr:`stderr` and :attr:`correl`.
-
-.. attribute::  var_names
-
-  ordered list of variable parameter names used in optimization, and
-  useful for understanding the the values in :attr:`init_vals` and
-  :attr:`covar`.
-
-.. attribute:: covar
-
-  covariance matrix from minimization (`leastsq` only), with
-  rows/columns using :attr:`var_names`.
-
-.. attribute:: init_vals
-
-  list of initial values for variable parameters using :attr:`var_names`.
-
-.. attribute::  nfev
-
-  number of function evaluations
-
-.. attribute::  success
-
-  boolean (``True``/``False``) for whether fit succeeded.
-
-.. attribute::  errorbars
-
-  boolean (``True``/``False``) for whether uncertainties were
-  estimated.
-
-.. attribute::  message
-
-  message about fit success.
-
-.. attribute::  ier
-
-  integer error value from :scipydoc:`optimize.leastsq`  (`leastsq`  only).
-
-.. attribute::  lmdif_message
-
-  message from :scipydoc:`optimize.leastsq` (`leastsq` only).
-
-.. attribute::  nvarys
-
-  number of variables in fit  :math:`N_{\rm varys}`
-
-.. attribute::  ndata
-
-  number of data points:  :math:`N`
-
-.. attribute::  nfree `
-
-  degrees of freedom in fit:  :math:`N - N_{\rm varys}`
-
-.. attribute::  residual
-
-  residual array, return value of :func:`func`:  :math:`{\rm Resid}`
-
-.. attribute::  chisqr
-
-  chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`
-
-.. attribute::  redchi
-
-  reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm
-  varys})}`
-
-.. attribute::  aic
-
-  Akaike Information Criterion statistic (see below)
-
-.. attribute::  bic
-
-  Bayesian Information Criterion statistic (see below).
+.. autoclass:: MinimizerResult
 
 
 
@@ -334,9 +214,9 @@ Goodness-of-Fit Statistics
 +----------------------+----------------------------------------------------------------------------+
 |    ndata             | number of data points:  :math:`N`                                          |
 +----------------------+----------------------------------------------------------------------------+
-|    nfree `           | degrees of freedom in fit:  :math:`N - N_{\rm varys}`                      |
+|    nfree             | degrees of freedom in fit:  :math:`N - N_{\rm varys}`                      |
 +----------------------+----------------------------------------------------------------------------+
-|    residual          | residual array, return value of :func:`func`:  :math:`{\rm Resid}`         |
+|    residual          | residual array, returned by the objective function: :math:`\{\rm Resid_i\}`|
 +----------------------+----------------------------------------------------------------------------+
 |    chisqr            | chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`                    |
 +----------------------+----------------------------------------------------------------------------+
@@ -388,7 +268,7 @@ The :class:`MinimizerResult` includes the traditional chi-square and reduced chi
    :nowrap:
 
    \begin{eqnarray*}
-        \chi^2  &=&  \sum_i^N r_i^2 \\
+	\chi^2  &=&  \sum_i^N r_i^2 \\
 	\chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys})
     \end{eqnarray*}
 
@@ -455,9 +335,6 @@ exception is raised in the iteration callback. When a fit is aborted this
 way, the parameters will have the values from the last iteration.  The fit
 statistics are not likely to be meaningful, and uncertainties will not be computed.
 
-
-.. module:: Minimizer
-
 ..  _fit-minimizer-label:
 
 Using the :class:`Minimizer` class
@@ -466,15 +343,15 @@ Using the :class:`Minimizer` class
 For full control of the fitting process, you'll want to create a
 :class:`Minimizer` object.
 
-.. class:: Minimizer(function, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, **kws)
+.. class:: Minimizer(function, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, mask_non_finite=False, **kws)
 
    creates a Minimizer, for more detailed access to fitting methods and attributes.
 
    :param function:  objective function to return fit residual.  See :ref:`fit-func-label` for details.
    :type  function:  callable.
    :param params:  a dictionary of Parameters.  Keywords must be strings
-                   that match ``[a-z_][a-z0-9_]*`` and is not a python
-                   reserved word.  Each value must be :class:`Parameter`.
+		   that match ``[a-z_][a-z0-9_]*`` and is not a python
+		   reserved word.  Each value must be :class:`Parameter`.
    :type  params:  dict
    :param fcn_args:  arguments tuple to pass to the residual function as  positional arguments.
    :type  fcn_args: tuple
@@ -484,30 +361,20 @@ For full control of the fitting process, you'll want to create a
    :type  iter_cb:  callable or ``None``
    :param scale_covar:  flag for automatically scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
    :type  scale_covar:  bool (default ``True``).
+   :param nan_policy: Specifies action if `userfcn` (or a Jacobian) returns nan
+    values. One of:
+
+		'raise' - a `ValueError` is raised
+		'propagate' - the values returned from `userfcn` are un-altered
+		'omit' - the non-finite values are filtered.
+
+   :type  nan_policy: str (default 'raise')
    :param kws:      dictionary to pass as keywords to the underlying :mod:`scipy.optimize` method.
    :type  kws:      dict
 
 The Minimizer object has a few public methods:
 
-.. method:: minimize(method='leastsq', params=None, **kws)
-
-   perform fit using either :meth:`leastsq` or :meth:`scalar_minimize`.
-
-   :param method: name of fitting method.  Must be one of the names in
-                  :ref:`Table of Supported Fitting Methods <fit-methods-table>`
-   :type  method:  str.
-   :param params:  a :class:`Parameters` dictionary for starting values
-   :type  params:  :class:`Parameters` or `None`
-
-   :return: :class:`MinimizerResult` object, containing updated
-            parameters, fitting statistics, and information.
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-   Additional keywords are passed on to the correspond :meth:`leastsq`
-   or :meth:`scalar_minimize` method.
-
+.. automethod:: Minimizer.minimize
 
 .. method:: leastsq(params=None, scale_covar=True, **kws)
 
@@ -578,15 +445,15 @@ The Minimizer object has a few public methods:
   :param params: a :class:`Parameters` dictionary for starting values
   :type  params: :class:`Parameters` or `None`
   :param steps: How many samples you would like to draw from the posterior
-                 distribution for each of the walkers?
+		 distribution for each of the walkers?
   :type  steps: int
   :param nwalkers: Should be set so :math:`nwalkers >> nvarys`, where `nvarys`
-                    are the number of parameters being varied during the fit.
-                    "Walkers are the members of the ensemble. They are almost
-                    like separate Metropolis-Hastings chains but, of course,
-                    the proposal distribution for a given walker depends on the
-                    positions of all the other walkers in the ensemble." - from
-                    [1]_.
+		    are the number of parameters being varied during the fit.
+		    "Walkers are the members of the ensemble. They are almost
+		    like separate Metropolis-Hastings chains but, of course,
+		    the proposal distribution for a given walker depends on the
+		    positions of all the other walkers in the ensemble." - from
+		    [1]_.
   :type  nwalkers: int
   :param burn: Discard this many samples from the start of the sampling regime.
   :type  burn: int
@@ -595,33 +462,33 @@ The Minimizer object has a few public methods:
   :param ntemps: If `ntemps > 1` perform a Parallel Tempering.
   :type ntemps: int
   :param pos: Specify the initial positions for the sampler.  If `ntemps == 1`
-              then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
-              `(ntemps, nwalkers, nvarys)`. You can also initialise using a
-              previous chain that had the same `ntemps`, `nwalkers` and `nvarys`.
+	      then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
+	      `(ntemps, nwalkers, nvarys)`. You can also initialise using a
+	      previous chain that had the same `ntemps`, `nwalkers` and `nvarys`.
   :type pos: np.ndarray
   :param reuse_sampler:  If you have already run :meth:`emcee` on a given
-            :class:`Minimizer` object then it possesses an internal sampler
-            attribute. You can continue to draw from the same sampler (retaining
-            the chain history) if you set this option to `True`. Otherwise a new
-            sampler is created. The `nwalkers`, `ntemps` and `params` keywords
-            are ignored with this option.
-            **Important**: the :class:`Parameters` used to create the sampler
-            must not change in-between calls to :meth:`emcee`. Alteration of
-            :class:`Parameters` would include changed ``min``, ``max``,
-            ``vary`` and ``expr`` attributes. This may happen, for example, if
-            you use an altered :class:`Parameters` object and call the
-            :meth:`minimize` method in-between calls to :meth:`emcee` .
+	    :class:`Minimizer` object then it possesses an internal sampler
+	    attribute. You can continue to draw from the same sampler (retaining
+	    the chain history) if you set this option to `True`. Otherwise a new
+	    sampler is created. The `nwalkers`, `ntemps` and `params` keywords
+	    are ignored with this option.
+	    **Important**: the :class:`Parameters` used to create the sampler
+	    must not change in-between calls to :meth:`emcee`. Alteration of
+	    :class:`Parameters` would include changed ``min``, ``max``,
+	    ``vary`` and ``expr`` attributes. This may happen, for example, if
+	    you use an altered :class:`Parameters` object and call the
+	    :meth:`minimize` method in-between calls to :meth:`emcee` .
   :type  reuse_sampler:  bool
   :param workers: For parallelization of sampling.  It can be any Pool-like object
-            with a map method that follows the same calling sequence as the
-            built-in map function. If int is given as the argument, then a
-            multiprocessing-based pool is spawned internally with the
-            corresponding number of parallel processes. 'mpi4py'-based
-            parallelization and 'joblib'-based parallelization pools can also
-            be used here. **Note**: because of multiprocessing overhead it may
-            only be worth parallelising if the objective function is expensive
-            to calculate, or if there are a large number of objective
-            evaluations per step (`ntemps * nwalkers * nvarys`).
+	    with a map method that follows the same calling sequence as the
+	    built-in map function. If int is given as the argument, then a
+	    multiprocessing-based pool is spawned internally with the
+	    corresponding number of parallel processes. 'mpi4py'-based
+	    parallelization and 'joblib'-based parallelization pools can also
+	    be used here. **Note**: because of multiprocessing overhead it may
+	    only be worth parallelising if the objective function is expensive
+	    to calculate, or if there are a large number of objective
+	    evaluations per step (`ntemps * nwalkers * nvarys`).
   :type workers: int or Pool-like
   :type float_behavior: str
   :param float_behavior: Specifies the meaning of the objective function if it
@@ -633,40 +500,40 @@ The Minimizer object has a few public methods:
 
    See Notes for further details.
   :param is_weighted: Has your objective function been weighted by measurement
-            uncertainties? If `is_weighted is True` then your objective
-            function is assumed to return residuals that have been divided by
-            the true measurement uncertainty `(data - model) / sigma`. If
-            `is_weighted is False` then the objective function is assumed to
-            return unweighted residuals, `data - model`. In this case `emcee`
-            will employ a positive measurement uncertainty during the sampling.
-            This measurement uncertainty will be present in the output params
-            and output chain with the name `__lnsigma`. A side effect of this
-            is that you cannot use this parameter name yourself.
-            **Important** this parameter only has any effect if your objective
-            function returns an array. If your objective function returns a
-            float, then this parameter is ignored. See Notes for more details.
+	    uncertainties? If `is_weighted is True` then your objective
+	    function is assumed to return residuals that have been divided by
+	    the true measurement uncertainty `(data - model) / sigma`. If
+	    `is_weighted is False` then the objective function is assumed to
+	    return unweighted residuals, `data - model`. In this case `emcee`
+	    will employ a positive measurement uncertainty during the sampling.
+	    This measurement uncertainty will be present in the output params
+	    and output chain with the name `__lnsigma`. A side effect of this
+	    is that you cannot use this parameter name yourself.
+	    **Important** this parameter only has any effect if your objective
+	    function returns an array. If your objective function returns a
+	    float, then this parameter is ignored. See Notes for more details.
   :type is_weighted: bool
   :param seed: If `seed` is an int, a new `np.random.RandomState` instance is used,
-            seeded with `seed`.
-            If `seed` is already a `np.random.RandomState` instance, then that
-            `np.random.RandomState` instance is used.
-            Specify `seed` for repeatable sampling.
+	    seeded with `seed`.
+	    If `seed` is already a `np.random.RandomState` instance, then that
+	    `np.random.RandomState` instance is used.
+	    Specify `seed` for repeatable sampling.
   :type seed: int or np.random.RandomState
 
   :return: :class:`MinimizerResult` object containing updated params, statistics,
-            etc. The :class:`MinimizerResult` also contains the ``chain``,
-            ``flatchain`` and ``lnprob`` attributes. The ``chain``
-            and ``flatchain`` attributes contain the samples and have the shape
-            `(nwalkers, (steps - burn) // thin, nvarys)` or
-            `(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
-            depending on whether Parallel tempering was used or not.
-            `nvarys` is the number of parameters that are allowed to vary.
-            The ``flatchain`` attribute is a :class:`pandas.DataFrame` of the
-            flattened chain, `chain.reshape(-1, nvarys)`. To access flattened
-            chain values for a particular parameter use
-            `result.flatchain[parname]`. The ``lnprob`` attribute contains the
-            log probability for each sample in ``chain``. The sample with the
-            highest probability corresponds to the maximum likelihood estimate.
+	    etc. The :class:`MinimizerResult` also contains the ``chain``,
+	    ``flatchain`` and ``lnprob`` attributes. The ``chain``
+	    and ``flatchain`` attributes contain the samples and have the shape
+	    `(nwalkers, (steps - burn) // thin, nvarys)` or
+	    `(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
+	    depending on whether Parallel tempering was used or not.
+	    `nvarys` is the number of parameters that are allowed to vary.
+	    The ``flatchain`` attribute is a :class:`pandas.DataFrame` of the
+	    flattened chain, `chain.reshape(-1, nvarys)`. To access flattened
+	    chain values for a particular parameter use
+	    `result.flatchain[parname]`. The ``lnprob`` attribute contains the
+	    log probability for each sample in ``chain``. The sample with the
+	    highest probability corresponds to the maximum likelihood estimate.
 
   This method samples the posterior distribution of the parameters using
   Markov Chain Monte Carlo.  To do so it needs to calculate the
@@ -760,10 +627,10 @@ Solving with :func:`minimize` gives the Maximum Likelihood solution.::
     >>> mi = lmfit.minimize(residual, p, method='Nelder')
     >>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
     [[Variables]]
-        a1:   2.98623688 (init= 4)
-        a2:  -4.33525596 (init= 4)
-        t1:   1.30993185 (init= 3)
-        t2:   11.8240752 (init= 3)
+	a1:   2.98623688 (init= 4)
+	a2:  -4.33525596 (init= 4)
+	t1:   1.30993185 (init= 3)
+	t2:   11.8240752 (init= 3)
     [[Correlations]] (unreported correlations are <  0.500)
     >>> plt.plot(x, y)
     >>> plt.plot(x, residual(mi.params) + y, 'r')
@@ -817,18 +684,18 @@ You can see that we recovered the right uncertainty level on the data.::
     median of posterior probability distribution
     ------------------------------------------
     [[Variables]]
-        a1:   3.00975345 +/- 0.151034 (5.02%) (init= 2.986237)
-        a2:  -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256)
-        t1:   1.32726415 +/- 0.142995 (10.77%) (init= 1.309932)
-        t2:   11.7911935 +/- 0.495583 (4.20%) (init= 11.82408)
-        f:    0.09805494 +/- 0.004256 (4.34%) (init= 1)
+	a1:   3.00975345 +/- 0.151034 (5.02%) (init= 2.986237)
+	a2:  -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256)
+	t1:   1.32726415 +/- 0.142995 (10.77%) (init= 1.309932)
+	t2:   11.7911935 +/- 0.495583 (4.20%) (init= 11.82408)
+	f:    0.09805494 +/- 0.004256 (4.34%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(a2, t2)                    =  0.981
-        C(a2, t1)                    = -0.927
-        C(t1, t2)                    = -0.880
-        C(a1, t1)                    = -0.519
-        C(a1, a2)                    =  0.195
-        C(a1, t2)                    =  0.146
+	C(a2, t2)                    =  0.981
+	C(a2, t1)                    = -0.927
+	C(t1, t2)                    = -0.880
+	C(a1, t1)                    = -0.519
+	C(a1, a2)                    =  0.195
+	C(a1, t2)                    =  0.146
 
     >>> # find the maximum likelihood solution
     >>> highest_prob = np.argmax(res.lnprob)
@@ -855,6 +722,8 @@ You can see that we recovered the right uncertainty level on the data.::
 Getting and Printing Fit Reports
 ===========================================
 
+.. currentmodule:: lmfit.printfuncs
+
 .. function:: fit_report(result, modelpars=None, show_correl=True, min_correl=0.1)
 
    generate and return text of report of best-fit values, uncertainties,
@@ -879,22 +748,22 @@ An example fit with report would be
 which would write out::
 
     [[Fit Statistics]]
-        # function evals   = 85
-        # data points      = 1001
-        # variables        = 4
-        chi-square         = 498.812
-        reduced chi-square = 0.500
-        Akaike info crit   = -685.215
-        Bayesian info crit = -665.579
+	# function evals   = 85
+	# data points      = 1001
+	# variables        = 4
+	chi-square         = 498.812
+	reduced chi-square = 0.500
+	Akaike info crit   = -689.223
+	Bayesian info crit = -669.587
     [[Variables]]
-        amp:      13.9121944 +/- 0.141202 (1.01%) (init= 13)
-        period:   5.48507044 +/- 0.026664 (0.49%) (init= 2)
-        shift:    0.16203676 +/- 0.014056 (8.67%) (init= 0)
-        decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
+	amp:      13.9121944 +/- 0.141202 (1.01%) (init= 13)
+	period:   5.48507044 +/- 0.026664 (0.49%) (init= 2)
+	shift:    0.16203676 +/- 0.014056 (8.67%) (init= 0)
+	decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(period, shift)             =  0.797
-        C(amp, decay)                =  0.582
-        C(amp, shift)                = -0.297
-        C(amp, period)               = -0.243
-        C(shift, decay)              = -0.182
-        C(period, decay)             = -0.150
+	C(period, shift)             =  0.797
+	C(amp, decay)                =  0.582
+	C(amp, shift)                = -0.297
+	C(amp, period)               = -0.243
+	C(shift, decay)              = -0.182
+	C(period, decay)             = -0.150
diff --git a/doc/index.rst b/doc/index.rst
index 5a2205a..0469958 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -6,12 +6,6 @@ Non-Linear Least-Square Minimization and Curve-Fitting for Python
 .. _Levenberg-Marquardt:     http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
 .. _MINPACK-1:               http://en.wikipedia.org/wiki/MINPACK
 
-
-.. warning::
-
-  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
-
-
 Lmfit provides a high-level interface to non-linear optimization and curve
 fitting problems for Python. Lmfit builds on and extends many of the
 optimization algorithm of :mod:`scipy.optimize`, especially the
@@ -47,8 +41,8 @@ fitting problems, including:
 
 .. _lmfit github repository:   http://github.com/lmfit/lmfit-py
 
-The lmfit package is Free software, using an MIT license.  The software and
-this document are works in progress.  If you are interested in
+The lmfit package is Free software, using an Open Source license.  The
+software and this document are works in progress.  If you are interested in
 participating in this effort please use the `lmfit github repository`_.
 
 
@@ -66,3 +60,4 @@ participating in this effort please use the `lmfit github repository`_.
    confidence
    bounds
    constraints
+   whatsnew
diff --git a/doc/installation.rst b/doc/installation.rst
index f9adadd..61e488e 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -10,37 +10,41 @@ Downloading and Installation
 Prerequisites
 ~~~~~~~~~~~~~~~
 
-The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.13 or
-higher is recommended, but extensive testing on compatibility with various
-versions of scipy has not been done.  Lmfit works with Python 2.7, 3.3 and
-3.4.  No testing has been done with Python 3.5, but as the package is pure
-Python, relying only on scipy and numpy, no significant troubles are
-expected.  The `nose`_ framework is required for running the test suite,
-and IPython and matplotib are recommended.  If Pandas is available, it will
-be used in portions of lmfit.
+The lmfit package requires Python, Numpy, and Scipy.
+
+Lmfit works with Python 2.7, 3.3, 3.4, and 3.5. Support for Python 2.6
+ended with Lmfit version 0.9.4.  Scipy version 0.14 or higher is required,
+with 0.17 or higher recommended to be able to use the latest optimization
+features from scipy.  Numpy version 1.5 or higher is required.
+
+In order to run the test suite, the `nose`_ framework is required.  Some
+parts of lmfit will be able to make use of IPython (version 4 or higher),
+matplotlib, and pandas if those libraries are installed, but no core
+functionality of lmfit requires these.
 
 
 Downloads
 ~~~~~~~~~~~~~
 
-
-The latest stable version of lmfit is  available from `PyPi <http://pypi.python.org/pypi/lmfit/>`_.
+The latest stable version of lmfit is |release| is available from `PyPi
+<http://pypi.python.org/pypi/lmfit/>`_.
 
 Installation
 ~~~~~~~~~~~~~~~~~
 
-If you have `pip`_  installed, you can install lmfit with::
+If you have `pip`_ installed, you can install lmfit with::
 
     pip install lmfit
 
-or, if  you have `Python Setup Tools`_  installed, you install lmfit with::
-
-   easy_install -U lmfit
+or you can download the source kit, unpack it and install with::
 
+   python setup.py install
 
-or, you can download the source kit, unpack it and install with::
+For Anaconda Python, lmfit is not an official packages, but several
+Anaconda channels provide it, allowing installation with (for example)::
 
-   python setup.py install
+   conda install -c conda-forge lmfit
+   conda install -c newville lmfit
 
 
 Development Version
@@ -50,7 +54,6 @@ To get the latest development version, use::
 
    git clone http://github.com/lmfit/lmfit-py.git
 
-
 and install using::
 
    python setup.py install
diff --git a/doc/intro.rst b/doc/intro.rst
index bc854f4..1c6271d 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -97,10 +97,10 @@ as::
     from lmfit import minimize, Parameters
 
     def residual(params, x, data, eps_data):
-        amp = params['amp'].value
-        pshift = params['phase'].value
-	freq = params['frequency'].value
-        decay = params['decay'].value
+        amp = params['amp']
+        pshift = params['phase']
+	freq = params['frequency']
+        decay = params['decay']
 
 	model = amp * sin(x * freq  + pshift) * exp(-x*x*decay)
 
diff --git a/doc/model.rst b/doc/model.rst
index 9b0b595..08a3876 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -4,7 +4,7 @@
 Modeling Data and Curve Fitting
 =================================================
 
-.. module:: model
+.. module:: lmfit.model
 
 A common use of least-squares minimization is *curve fitting*, where one
 has a parametrized model function meant to explain some phenomena and wants
@@ -146,21 +146,21 @@ a :class:`ModelResult` object.  As we will see below, this has many
 components, including a :meth:`fit_report` method, which will show::
 
     [[Model]]
-        gaussian
+	gaussian
     [[Fit Statistics]]
-        # function evals   = 33
-        # data points      = 101
-        # variables        = 3
-        chi-square         = 3.409
-        reduced chi-square = 0.035
-        Akaike info crit   = -333.218
-        Bayesian info crit = -325.373
+	# function evals   = 33
+	# data points      = 101
+	# variables        = 3
+	chi-square         = 3.409
+	reduced chi-square = 0.035
+	Akaike info crit   = -336.264
+	Bayesian info crit = -328.418
     [[Variables]]
-        amp:   8.88021829 +/- 0.113594 (1.28%) (init= 5)
-        cen:   5.65866102 +/- 0.010304 (0.18%) (init= 5)
-        wid:   0.69765468 +/- 0.010304 (1.48%) (init= 1)
+	amp:   8.88021829 +/- 0.113594 (1.28%) (init= 5)
+	cen:   5.65866102 +/- 0.010304 (0.18%) (init= 5)
+	wid:   0.69765468 +/- 0.010304 (1.48%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(amp, wid)                  =  0.577
+	C(amp, wid)                  =  0.577
 
 The result will also have :attr:`init_fit` for the fit with the initial
 parameter values and a :attr:`best_fit` for the fit with the best fit
@@ -348,9 +348,9 @@ specifying one or more independent variables.
     * ``None``: Do not check for null or missing values (default)
     * ``'none'``: Do not check for null or missing values.
     * ``'drop'``: Drop null or missing observations in data.  If pandas is
-                installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used.
+		installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used.
     * ``'raise'``: Raise a (more helpful) exception when data contains null
-                  or missing values.
+		  or missing values.
 
 .. attribute:: name
 
@@ -667,11 +667,12 @@ including `chisqr`, `redchi`, `aic`, and `bic`.
 These methods are all inherited from :class:`Minimize` or from
 :class:`Model`.
 
-.. method:: ModelResult.eval(**kwargs)
+.. method:: ModelResult.eval(params=None, **kwargs)
 
-   evaluate the model using the best-fit parameters and supplied
-   independent variables.  The ``**kwargs`` arguments can be used to update
-   parameter values and/or independent variables.
+   evaluate the model using parameters supplied (or the best-fit parameters
+   if not specified) and supplied independent variables.  The ``**kwargs``
+   arguments can be used to update parameter values and/or independent
+   variables.
 
 
 .. method:: ModelResult.eval_components(**kwargs)
@@ -975,11 +976,11 @@ to model a peak with a background. For such a simple problem, we could just
 build a model that included both components::
 
     def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
-        "line + 1-d gaussian"
+	"line + 1-d gaussian"
 
-        gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
-        line = slope * x + intercept
-        return gauss + line
+	gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+	line = slope * x + intercept
+	return gauss + line
 
 and use that with::
 
@@ -991,8 +992,8 @@ model function would have to be changed.  As an alternative we could define
 a linear function::
 
     def line(x, slope, intercept):
-        "a line"
-        return slope * x + intercept
+	"a line"
+	return slope * x + intercept
 
 and build a composite model with just::
 
@@ -1005,24 +1006,24 @@ This model has parameters for both component models, and can be used as:
 which prints out the results::
 
     [[Model]]
-        (Model(gaussian) + Model(line))
+	(Model(gaussian) + Model(line))
     [[Fit Statistics]]
-        # function evals   = 44
-        # data points      = 101
-        # variables        = 5
-        chi-square         = 2.579
-        reduced chi-square = 0.027
-        Akaike info crit   = -355.329
-        Bayesian info crit = -342.253
+	# function evals   = 44
+	# data points      = 101
+	# variables        = 5
+	chi-square         = 2.579
+	reduced chi-square = 0.027
+	Akaike info crit   = -360.457
+	Bayesian info crit = -347.381
     [[Variables]]
-        amp:         8.45931061 +/- 0.124145 (1.47%) (init= 5)
-        cen:         5.65547872 +/- 0.009176 (0.16%) (init= 5)
-        intercept:  -0.96860201 +/- 0.033522 (3.46%) (init= 1)
-        slope:       0.26484403 +/- 0.005748 (2.17%) (init= 0)
-        wid:         0.67545523 +/- 0.009916 (1.47%) (init= 1)
+	amp:         8.45931061 +/- 0.124145 (1.47%) (init= 5)
+	cen:         5.65547872 +/- 0.009176 (0.16%) (init= 5)
+	intercept:  -0.96860201 +/- 0.033522 (3.46%) (init= 1)
+	slope:       0.26484403 +/- 0.005748 (2.17%) (init= 0)
+	wid:         0.67545523 +/- 0.009916 (1.47%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(amp, wid)                  =  0.666
-        C(cen, intercept)            =  0.129
+	C(amp, wid)                  =  0.666
+	C(cen, intercept)            =  0.129
 
 
 and shows the plot on the left.
@@ -1099,13 +1100,13 @@ convolution function, perhaps as::
 
     import numpy as np
     def convolve(dat, kernel):
-        # simple convolution
-        npts = min(len(dat), len(kernel))
-        pad  = np.ones(npts)
-        tmp  = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
-        out  = np.convolve(tmp, kernel, mode='valid')
-        noff = int((len(out) - npts)/2)
-        return (out[noff:])[:npts]
+	# simple convolution
+	npts = min(len(dat), len(kernel))
+	pad  = np.ones(npts)
+	tmp  = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
+	out  = np.convolve(tmp, kernel, mode='valid')
+	noff = int((len(out) - npts)/2)
+	return (out[noff:])[:npts]
 
 which extends the data in both directions so that the convolving kernel
 function gives a valid result over the data range.  Because this function
@@ -1117,23 +1118,24 @@ binary operator.  A full script using this technique is here:
 which prints out the results::
 
     [[Model]]
-        (Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
+	(Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
     [[Fit Statistics]]
-        # function evals   = 23
-        # data points      = 201
-        # variables        = 3
-        chi-square         = 25.789
-        reduced chi-square = 0.130
-        Akaike info crit   = -403.702
-        Bayesian info crit = -393.793
+	# function evals   = 27
+	# data points      = 201
+	# variables        = 3
+	chi-square         = 22.091
+	reduced chi-square = 0.112
+	Akaike info crit   = -437.837
+	Bayesian info crit = -427.927
     [[Variables]]
-        mid:         5 (fixed)
-        amplitude:   0.62249894 +/- 0.001946 (0.31%) (init= 1)
-        sigma:       0.61438887 +/- 0.014057 (2.29%) (init= 1.5)
-        center:      4.51710256 +/- 0.010152 (0.22%) (init= 3.5)
+	mid:         5 (fixed)
+	sigma:       0.64118585 +/- 0.013233 (2.06%) (init= 1.5)
+	center:      4.51633608 +/- 0.009567 (0.21%) (init= 3.5)
+	amplitude:   0.62654849 +/- 0.001813 (0.29%) (init= 1)
     [[Correlations]] (unreported correlations are <  0.100)
-        C(amplitude, center)         =  0.335
-        C(amplitude, sigma)          =  0.273
+	C(center, amplitude)         =  0.344
+	C(sigma, amplitude)          =  0.280
+
 
 and shows the plots:
 
diff --git a/doc/parameters.rst b/doc/parameters.rst
index e9b53d7..419ff61 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -1,5 +1,7 @@
 .. _parameters_chapter:
 
+.. module:: lmfit.parameter
+
 ================================================
 :class:`Parameter`  and :class:`Parameters`
 ================================================
@@ -94,42 +96,40 @@ The :class:`Parameter` class
     be set only if the provided value is not ``None``.  You can use this to
     update some Parameter attribute without affecting others, for example::
 
-        p1 = Parameter('a', value=2.0)
-        p2 = Parameter('b', value=0.0)
-        p1.set(min=0)
-        p2.set(vary=False)
+	p1 = Parameter('a', value=2.0)
+	p2 = Parameter('b', value=0.0)
+	p1.set(min=0)
+	p2.set(vary=False)
 
     to set a lower bound, or to set a Parameter as have a fixed value.
 
     Note that to use this approach to lift a lower or upper bound, doing::
 
-        p1.set(min=0)
-        .....
-        # now lift the lower bound
-        p1.set(min=None)   # won't work!  lower bound NOT changed
+	p1.set(min=0)
+	.....
+	# now lift the lower bound
+	p1.set(min=None)   # won't work!  lower bound NOT changed
 
     won't work -- this will not change the current lower bound.  Instead
     you'll have to use ``np.inf`` to remove a lower or upper bound::
 
-        # now lift the lower bound
-        p1.set(min=-np.inf)   # will work!
+	# now lift the lower bound
+	p1.set(min=-np.inf)   # will work!
 
     Similarly, to clear an expression of a parameter, you need to pass an
     empty string, not ``None``.  You also need to give a value and
     explicitly tell it to vary::
 
-        p3 = Parameter('c', expr='(a+b)/2')
-        p3.set(expr=None)     # won't work!  expression NOT changed
+	p3 = Parameter('c', expr='(a+b)/2')
+	p3.set(expr=None)     # won't work!  expression NOT changed
 
-        # remove constraint expression
-        p3.set(value=1.0, vary=True, expr='')  # will work!  parameter now unconstrained
+	# remove constraint expression
+	p3.set(value=1.0, vary=True, expr='')  # will work!  parameter now unconstrained
 
 
 The :class:`Parameters` class
 ========================================
 
-.. currentmodule:: lmfit.parameter
-
 .. class:: Parameters()
 
    create a Parameters object.  This is little more than a fancy ordered
@@ -150,28 +150,28 @@ The :class:`Parameters` class
        object associated with the key `name`, with optional arguments
        passed to :class:`Parameter`::
 
-         p = Parameters()
-         p.add('myvar', value=1, vary=True)
+	 p = Parameters()
+	 p.add('myvar', value=1, vary=True)
 
     .. method:: add_many(self, paramlist)
 
        add a list of named parameters.  Each entry must be a tuple
        with the following entries::
 
-            name, value, vary, min, max, expr
+	    name, value, vary, min, max, expr
 
        This method is somewhat rigid and verbose (no default values), but can
        be useful when initially defining a parameter list so that it looks
        table-like::
 
-         p = Parameters()
-         #           (Name,  Value,  Vary,   Min,  Max,  Expr)
-         p.add_many(('amp1',    10,  True, None, None,  None),
-                    ('cen1',   1.2,  True,  0.5,  2.0,  None),
-                    ('wid1',   0.8,  True,  0.1, None,  None),
-                    ('amp2',   7.5,  True, None, None,  None),
-                    ('cen2',   1.9,  True,  1.0,  3.0,  None),
-                    ('wid2',  None, False, None, None, '2*wid1/3'))
+	 p = Parameters()
+	 #           (Name,  Value,  Vary,   Min,  Max,  Expr)
+	 p.add_many(('amp1',    10,  True, None, None,  None),
+		    ('cen1',   1.2,  True,  0.5,  2.0,  None),
+		    ('wid1',   0.8,  True,  0.1, None,  None),
+		    ('amp2',   7.5,  True, None, None,  None),
+		    ('cen2',   1.9,  True,  1.0,  3.0,  None),
+		    ('wid2',  None, False, None, None, '2*wid1/3'))
 
 
     .. automethod:: Parameters.pretty_print
@@ -228,10 +228,10 @@ can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
 which would make the objective function ``fcn2min`` above look like::
 
     def fcn2min(params, x, data):
-        """ model decaying sine wave, subtract data"""
-        v = params.valuesdict()
+	""" model decaying sine wave, subtract data"""
+	v = params.valuesdict()
 
-        model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
-        return model - data
+	model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+	return model - data
 
 The results are identical, and the difference is a stylistic choice.
diff --git a/doc/testlinks.py b/doc/testlinks.py
new file mode 100644
index 0000000..a35a685
--- /dev/null
+++ b/doc/testlinks.py
@@ -0,0 +1,9 @@
+from sphinx.ext.intersphinx import fetch_inventory
+import warnings
+uri = 'file:///Users/Newville/Codes/lmfit-py/doc/_build/html/'
+inv = fetch_inventory(warnings, uri, uri + 'objects.inv')
+print (" INV : ", inv)
+
+for key in inv.keys():
+    for key2 in inv[key]:
+        print(key2)
diff --git a/doc/testlinks.py.~1~ b/doc/testlinks.py.~1~
new file mode 100644
index 0000000..e69de29
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index 125f2c7..063c6c9 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -11,6 +11,45 @@ changes to the use and behavior of the library.  This is not meant to be a
 comprehensive list of changes.  For such a complete record, consult the
 `lmfit github repository`_.
 
+.. _whatsnew_095_label:
+
+Version 0.9.5 Release Notes
+==========================================
+
+Support for Python 2.6 and scipy 0.13 has been dropped.
+
+.. _whatsnew_094_label:
+
+Version 0.9.4 Release Notes
+==========================================
+
+Some support for the new `least_squares` routine from scipy 0.17 has been
+added.
+
+
+Parameters can now be used directly in floating point or array expressions,
+so that the Parameter value does not need `sigma = params['sigma'].value`.
+The older, explicit usage still works, but the docs, samples, and tests
+have been updated to use the simpler usage.
+
+Support for Python 2.6 and scipy 0.13 is now explicitly deprecated and wil
+be dropped in version 0.9.5.
+
+.. _whatsnew_093_label:
+
+Version 0.9.3 Release Notes
+==========================================
+
+Models involving complex numbers have been improved.
+
+The `emcee` module can now be used for uncertainty estimation.
+
+Many bug fixes, and an important fix for performance slowdown on getting
+parameter values.
+
+ASV benchmarking code added.
+
+
 .. _whatsnew_090_label:
 
 Version 0.9.0 Release Notes
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index 30d0d6b..ccfd898 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -25,14 +25,16 @@ from scipy.optimize.  It has a number of useful enhancements, including:
 
   * Many pre-built models for common lineshapes are included and ready to use.
 
-   version: 0.8.0
-   last update: 2014-Sep-21
-   License: MIT
+   version: 0.9.5
+   last update: 2016-Jul-26
+   License: BSD
    Authors:  Matthew Newville, The University of Chicago
              Till Stensitzki, Freie Universitat Berlin
              Daniel B. Allen, Johns Hopkins University
              Antonino Ingargiola, University of California, Los Angeles
 """
+import warnings
+import sys
 
 from .minimizer import minimize, Minimizer, MinimizerException
 from .parameter import Parameter, Parameters
@@ -49,5 +51,17 @@ from .uncertainties import ufloat, correlated_values
 
 ## versioneer code
 from ._version import get_versions
+
 __version__ = get_versions()['version']
 del get_versions
+
+# PY26 Depreciation Warning
+if sys.version_info[:2] == (2, 6):
+    warnings.warn('Support for Python 2.6.x was dropped with lmfit 0.9.5')
+
+# SCIPY 0.13 Depreciation Warning
+import scipy
+scipy_major, scipy_minor, scipy_other = scipy.__version__.split('.', 2)
+
+if int(scipy_major) == 0 and int(scipy_minor) < 15:
+    warnings.warn('Support for Scipy 0.14 was dropped with lmfit 0.9.5')
diff --git a/lmfit/_version.py b/lmfit/_version.py
index fa233a4..b5205c9 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -4,8 +4,8 @@
 # unpacked source archive. Distribution tarballs contain a pre-generated copy
 # of this file.
 
-version_version = '0.9.3'
-version_full = '8a4eb4f5675628bc8ee2d0da70b9e9397f80c280'
+version_version = '0.9.5'
+version_full = 'b8f4c6f4dac8e7ae395dbf2dd67b4212b34e304a'
 def get_versions(default={}, verbose=False):
     return {'version': version_version, 'full': version_full}
 
diff --git a/lmfit/asteval.py b/lmfit/asteval.py
index e8409aa..cb839e8 100644
--- a/lmfit/asteval.py
+++ b/lmfit/asteval.py
@@ -86,28 +86,28 @@ class Interpreter:
         symtable['print'] = self._printer
 
         # add python symbols
-        py_symtable = {sym: __builtins__[sym] for sym in FROM_PY
-                              if sym in __builtins__}
+        py_symtable = dict((sym, __builtins__[sym]) for sym in FROM_PY
+                              if sym in __builtins__)
         symtable.update(py_symtable)
 
         # add local symbols
-        local_symtable = {sym: obj for (sym, obj) in LOCALFUNCS.items()}
+        local_symtable = dict((sym, obj) for (sym, obj) in LOCALFUNCS.items())
         symtable.update(local_symtable)
 
         # add math symbols
-        math_symtable = {sym: getattr(math, sym) for sym in FROM_MATH
-                              if hasattr(math, sym)}
+        math_symtable = dict((sym, getattr(math, sym)) for sym in FROM_MATH
+                              if hasattr(math, sym))
         symtable.update(math_symtable)
 
         # add numpy symbols
         if self.use_numpy:
-            numpy_symtable = {sym: getattr(numpy, sym) for sym in FROM_NUMPY
-                              if hasattr(numpy, sym)}
+            numpy_symtable = dict((sym, getattr(numpy, sym)) for sym in FROM_NUMPY
+                              if hasattr(numpy, sym))
             symtable.update(numpy_symtable)
 
-            npy_rename_symtable = {name: getattr(numpy, sym) for name, sym
+            npy_rename_symtable = dict((name, getattr(numpy, sym)) for name, sym
                                    in NUMPY_RENAMES.items()
-                                   if hasattr(numpy, sym)}
+                                   if hasattr(numpy, sym))
             symtable.update(npy_rename_symtable)
 
         self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
diff --git a/lmfit/astutils.py b/lmfit/astutils.py
index 9df7146..4906c9c 100644
--- a/lmfit/astutils.py
+++ b/lmfit/astutils.py
@@ -100,9 +100,9 @@ FROM_NUMPY = ('Inf', 'NAN', 'abs', 'add', 'alen', 'all', 'amax', 'amin',
               'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and',
               'logical_not', 'logical_or', 'logical_xor', 'logspace',
               'long', 'longcomplex', 'longdouble', 'longfloat', 'longlong',
-              'mafromtxt', 'mask_indices', 'mat', 'matrix', 'max',
+              'mafromtxt', 'mask_indices', 'mat', 'matrix',
               'maximum', 'maximum_sctype', 'may_share_memory', 'mean',
-              'median', 'memmap', 'meshgrid', 'mgrid', 'min', 'minimum',
+              'median', 'memmap', 'meshgrid', 'mgrid', 'minimum',
               'mintypecode', 'mirr', 'mod', 'modf', 'msort', 'multiply',
               'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax',
               'nanmin', 'nansum', 'ndarray', 'ndenumerate', 'ndfromtxt',
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index 6c55279..573cb33 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -267,12 +267,12 @@ def powerlaw(x, amplitude=1, exponent=1.0):
     return amplitude * x**exponent
 
 
-def linear(x, slope, intercept):
+def linear(x, slope=1.0, intercept=0.0):
     "x -> slope * x + intercept"
     return slope * x + intercept
 
 
-def parabolic(x, a, b, c):
+def parabolic(x, a=0.0, b=0.0, c=0.0):
     "x -> a * x**2 + b * x + c"
     return a * x**2 + b * x + c
 
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index c67a396..46a582f 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -14,13 +14,24 @@ function-to-be-minimized (residual function) in terms of these Parameters.
 from copy import deepcopy
 import numpy as np
 from numpy import (dot, eye, ndarray, ones_like,
-                   sqrt, take, transpose, triu, deprecate)
+                   sqrt, take, transpose, triu)
 from numpy.dual import inv
 from numpy.linalg import LinAlgError
 import multiprocessing
 import numbers
 
+##
+##  scipy version notes:
+##  currently scipy 0.14 is required.
+##  feature           scipy version added
+##    minimize              0.11
+##    OptimizeResult        0.13
+##    diff_evolution        0.15
+##    least_squares         0.17
+##
+
 from scipy.optimize import leastsq as scipy_leastsq
+from scipy.optimize import minimize as scipy_minimize
 
 # differential_evolution is only present in scipy >= 0.15
 try:
@@ -28,6 +39,14 @@ try:
 except ImportError:
     from ._differentialevolution import differential_evolution as scipy_diffev
 
+# check for scipy.opitimize.least_squares
+HAS_LEAST_SQUARES = False
+try:
+    from scipy.optimize import least_squares
+    HAS_LEAST_SQUARES = True
+except ImportError:
+    pass
+
 # check for EMCEE
 HAS_EMCEE = False
 try:
@@ -44,14 +63,6 @@ try:
 except ImportError:
     pass
 
-# check for scipy.optimize.minimize
-HAS_SCALAR_MIN = False
-try:
-    from scipy.optimize import minimize as scipy_minimize
-    HAS_SCALAR_MIN = True
-except ImportError:
-    pass
-
 from .parameter import Parameter, Parameters
 
 # use locally modified version of uncertainties package
@@ -138,24 +149,65 @@ SCALAR_METHODS = {'nelder': 'Nelder-Mead',
 
 
 class MinimizerResult(object):
-    """ The result of a minimization.
+    """
+    A class that holds the results of a minimization.
+
+    This is a plain container (with no methods of its own) that
+    simply holds the results of the minimization.  Fit results
+    include data such as status and error messages, fit statistics,
+    and the updated (i.e. best-fit) parameters themselves :attr:`params`.
+
+    The list of (possible) `MinimizerResult` attributes follows.
 
     Attributes
     ----------
-    params : Parameters
-        The best-fit parameters
-    success : bool
-        Whether the minimization was successful
+    params : :class:`lmfit.parameters.Parameters`
+        The best-fit parameters resulting from the fit.
     status : int
         Termination status of the optimizer. Its value depends on the
         underlying solver. Refer to `message` for details.
+    var_names : list
+        Ordered list of variable parameter names used in optimization, and
+        useful for understanding the the values in :attr:`init_vals` and
+        :attr:`covar`.
+    covar : numpy.ndarray
+        covariance matrix from minimization (`leastsq` only), with
+        rows/columns using :attr:`var_names`.
+    init_vals : list
+        List of initial values for variable parameters using :attr:`var_names`.
+    init_values : dict
+        Dictionary of initial values for variable parameters.
+    nfev : int
+        Number of function evaluations
+    success : bool
+        Boolean (``True``/``False``) for whether fit succeeded.
+    errorbars : bool
+        Boolean (``True``/``False``) for whether uncertainties were estimated.
+    message : string
+        Message about fit success.
+    ier : int
+        Integer error value from :scipydoc:`optimize.leastsq` (`leastsq` only).
+    lmdif_message : string
+        Message from :scipydoc:`optimize.leastsq` (`leastsq` only).
+    nvarys : int
+        Number of variables in fit :math:`N_{\\rm varys}`.
+    ndata : int
+        Number of data points:  :math:`N`
+    nfree : int
+        Degrees of freedom in fit:  :math:`N - N_{\\rm varys}`
+    residual : numpy.ndarray
+        Residual array :math:`{\\rm Resid_i}`. Return value of the objective
+        function.
+    chisqr : float
+        Chi-square: :math:`\chi^2 = \sum_i^N [{\\rm Resid}_i]^2`
+    redchi : float
+        Reduced chi-square:
+        :math:`\chi^2_{\\nu}= {\chi^2} / {(N - N_{\\rm varys})}`
+    aic : float
+        Akaike Information Criterion statistic.
+    bic : float
+        Bayesian Information Criterion statistic.
 
-    Notes
-    -----
-    Additional attributes not listed above may be present, depending on the
-    specific solver. Since this class is essentially a subclass of dict
-    with attribute accessors, one can see which attributes are available
-    using the `keys()` method.
     """
     def __init__(self, **kws):
         for key, val in kws.items():
@@ -179,14 +231,15 @@ class MinimizerResult(object):
 
 class Minimizer(object):
     """A general minimizer for curve fitting"""
-    err_nonparam = ("params must be a minimizer.Parameters() instance or list "
-                    "of Parameters()")
-    err_maxfev = ("Too many function calls (max set to %i)!  Use:"
-                  " minimize(func, params, ..., maxfev=NNN)"
-                  "or set leastsq_kws['maxfev']  to increase this maximum.")
+    _err_nonparam = ("params must be a minimizer.Parameters() instance or list "
+                     "of Parameters()")
+    _err_maxfev = ("Too many function calls (max set to %i)!  Use:"
+                   " minimize(func, params, ..., maxfev=NNN)"
+                   "or set leastsq_kws['maxfev']  to increase this maximum.")
 
     def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
-                 iter_cb=None, scale_covar=True, **kws):
+                 iter_cb=None, scale_covar=True, nan_policy='raise',
+                 **kws):
         """
         Initialization of the Minimzer class
 
@@ -197,12 +250,12 @@ class Minimizer(object):
             model and data) to be minimized in a least squares sense.  The
             function must have the signature:
             `userfcn(params, *fcn_args, **fcn_kws)`
-        params : lmfit.parameter.Parameters object.
+        params : :class:`lmfit.parameter.Parameters` object.
             contains the Parameters for the model.
         fcn_args : tuple, optional
-            positional arguments to pass to userfcn.
+            positional arguments to pass to `userfcn`.
         fcn_kws : dict, optional
-            keyword arguments to pass to userfcn.
+            keyword arguments to pass to `userfcn`.
         iter_cb : callable, optional
             Function to be called at each fit iteration. This function should
             have the signature:
@@ -213,23 +266,34 @@ class Minimizer(object):
         scale_covar : bool, optional
             Whether to automatically scale the covariance matrix (leastsq
             only).
+        nan_policy : str, optional
+            Specifies action if `userfcn` (or a Jacobian) returns nan
+            values. One of:
+
+                'raise' - a `ValueError` is raised
+                'propagate' - the values returned from `userfcn` are un-altered
+                'omit' - the non-finite values are filtered.
+
         kws : dict, optional
             Options to pass to the minimizer being used.
 
         Notes
         -----
         The objective function should return the value to be minimized. For the
-        Levenberg-Marquardt algorithm from leastsq(), this returned value must
+        Levenberg-Marquardt algorithm from :meth:`leastsq` or
+        :meth:`least_squares`, this returned value must
         be an array, with a length greater than or equal to the number of
         fitting variables in the model. For the other methods, the return value
         can either be a scalar or an array. If an array is returned, the sum of
         squares of the array will be sent to the underlying fitting method,
-        effectively doing a least-squares optimization of the return values.
-
-        A common use for the fcn_args and fcn_kwds would be to pass in other
-        data needed to calculate the residual, including such things as the
-        data array, dependent variable, uncertainties in the data, and other
-        data structures for the model calculation.
+        effectively doing a least-squares optimization of the return values. If
+        the objective function returns non-finite values then a `ValueError`
+        will be raised because the underlying solvers cannot deal with them.
+
+        A common use for the `fcn_args` and `fcn_kwds` would be to pass in
+        other data needed to calculate the residual, including such things
+        as the data array, dependent variable, uncertainties in the data,
+        and other data structures for the model calculation.
         """
         self.userfcn = userfcn
         self.userargs = fcn_args
@@ -258,36 +322,54 @@ class Minimizer(object):
 
         self.params = params
         self.jacfcn = None
+        self.nan_policy = nan_policy
 
     @property
     def values(self):
+        """dict : Parameter values in a simple dictionary.
         """
-        Returns
-        -------
-        param_values : dict
-            Parameter values in a simple dictionary.
-        """
-
-        return dict([(name, p.value) for name, p in self.result.params.items()])
+        return {name: p.value for name, p in self.result.params.items()}
 
-    def __residual(self, fvars):
+    def __residual(self, fvars, apply_bounds_transformation=True):
         """
         Residual function used for least-squares fit.
         With the new, candidate values of fvars (the fitting variables), this
         evaluates all parameters, including setting bounds and evaluating
         constraints, and then passes those to the user-supplied function to
         calculate the residual.
+
+        Parameters
+        ----------------
+        fvars : np.ndarray
+            Array of new parameter values suggested by the minimizer.
+        apply_bounds_transformation : bool, optional
+            If true, apply lmfits parameter transformation to constrain
+            parameters. This is needed for solvers without inbuilt support for
+            bounds.
+
+        Returns
+        -----------
+        residuals : np.ndarray
+             The evaluated function values for given fvars.
         """
         # set parameter values
         if self._abort:
             return None
         params = self.result.params
-        for name, val in zip(self.result.var_names, fvars):
-            params[name].value = params[name].from_internal(val)
-        self.result.nfev += 1
 
+        if apply_bounds_transformation:
+            for name, val in zip(self.result.var_names, fvars):
+                params[name].value = params[name].from_internal(val)
+        else:
+            for name, val in zip(self.result.var_names, fvars):
+                params[name].value = val
         params.update_constraints()
+
+        self.result.nfev += 1
+
         out = self.userfcn(params, *self.userargs, **self.userkws)
+        out = _nan_policy(out, nan_policy=self.nan_policy)
+
         if callable(self.iter_cb):
             abort = self.iter_cb(params, self.result.nfev, out,
                                  *self.userargs, **self.userkws)
@@ -313,9 +395,12 @@ class Minimizer(object):
 
         self.result.nfev += 1
         pars.update_constraints()
+
         # compute the jacobian for "internal" unbounded variables,
-        # the rescale for bounded "external" variables.
+        # then rescale for bounded "external" variables.
         jac = self.jacfcn(pars, *self.userargs, **self.userkws)
+        jac = _nan_policy(jac, nan_policy=self.nan_policy)
+
         if self.col_deriv:
             jac = (jac.transpose()*grad_scale).transpose()
         else:
@@ -324,15 +409,16 @@ class Minimizer(object):
 
     def penalty(self, fvars):
         """
-        Penalty function for scalar minimizers:
+        Penalty function for scalar minimizers.
 
         Parameters
         ----------
-        fvars : array of values for the variable parameters
+        fvars : numpy.ndarray
+            Array of values for the variable parameters
 
         Returns
         -------
-        r - float
+        r : float
             The user evaluated user-supplied objective function. If the
             objective function is an array, return the array sum-of-squares
         """
@@ -343,12 +429,12 @@ class Minimizer(object):
 
     def prepare_fit(self, params=None):
         """
-        Prepares parameters for fitting,
-        return array of initial values
+        Prepares parameters for fitting, return array of initial values.
         """
         # determine which parameters are actually variables
         # and which are defined expressions.
-        result = self.result = MinimizerResult()
+        self.result = MinimizerResult()
+        result = self.result
         if params is not None:
             self.params = params
         if isinstance(self.params, Parameters):
@@ -357,11 +443,11 @@ class Minimizer(object):
             result.params = Parameters()
             for par in self.params:
                 if not isinstance(par, Parameter):
-                    raise MinimizerException(self.err_nonparam)
+                    raise MinimizerException(self._err_nonparam)
                 else:
                     result.params[par.name] = par
         elif self.params is None:
-            raise MinimizerException(self.err_nonparam)
+            raise MinimizerException(self._err_nonparam)
 
         # determine which parameters are actually variables
         # and which are defined expressions.
@@ -385,49 +471,23 @@ class Minimizer(object):
             if par.name is None:
                 par.name = name
         result.nvarys = len(result.var_names)
+        result.init_values = {n: v for n, v in zip(result.var_names,
+                                                   result.init_vals)}
         return result
 
     def unprepare_fit(self):
         """
-        Unprepares the fit, so that subsequent fits will be
-        forced to run prepare_fit.
+        Clean fit state, so that subsequent fits will need to call prepare_fit.
 
-        removes ast compilations of constraint expressions
+        Removes AST compilations of constraint expressions.
         """
         pass
 
-    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
-                       'and method=\'L-BFGS-B\' instead')
-    def lbfgsb(self, **kws):
-        """
-        Use l-bfgs-b minimization
-
-        Parameters
-        ----------
-        kws : dict
-            Minimizer options to pass to the
-            scipy.optimize.lbfgsb.fmin_l_bfgs_b function.
-
-        """
-        raise NotImplementedError("use scalar_minimize(method='L-BFGS-B')")
-
-    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
-                       'and method=\'Nelder-Mead\' instead')
-    def fmin(self, **kws):
+    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
         """
-        Use Nelder-Mead (simplex) minimization
+        Scalar minimization using `scipy.optimize.minimize`.
 
-        Parameters
-        ----------
-        kws : dict
-            Minimizer options to pass to the scipy.optimize.fmin minimizer.
-        """
-        raise NotImplementedError("use scalar_minimize(method='Nelder-Mead')")
 
-    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
-        """
-        Use one of the scalar minimization methods from
-        scipy.optimize.minimize.
 
         Parameters
         ----------
@@ -449,7 +509,7 @@ class Minimizer(object):
         params : Parameters, optional
            Parameters to use as starting points.
         kws : dict, optional
-            Minimizer options pass to scipy.optimize.minimize.
+            Minimizer options pass to `scipy.optimize.minimize`.
 
         If the objective function returns a numpy array instead
         of the expected scalar, the sum of squares of the array
@@ -463,14 +523,17 @@ class Minimizer(object):
 
         Returns
         -------
-        success : bool
-            Whether the fit was successful.
+        :class:`MinimizerResult`
+            Object containing the optimized parameter
+            and several goodness-of-fit statistics.
+
 
+        .. versionchanged:: 0.9.0
+           return value changed to :class:`MinimizerResult`
         """
-        if not HAS_SCALAR_MIN:
-            raise NotImplementedError
 
         result = self.prepare_fit(params=params)
+        result.method = method
         vars = result.init_vals
         params = result.params
 
@@ -496,10 +559,13 @@ class Minimizer(object):
 
         if method == 'differential_evolution':
             fmin_kws['method'] = _differential_evolution
-            bounds = [(par.min, par.max) for par in params.values()]
-            if not np.all(np.isfinite(bounds)):
+            bounds = np.asarray([(par.min, par.max)
+                                 for par in params.values()])
+            varying = np.asarray([par.vary for par in params.values()])
+
+            if not np.all(np.isfinite(bounds[varying])):
                 raise ValueError('With differential evolution finite bounds '
-                                 'are required for each parameter')
+                                 'are required for each varying parameter')
             bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
             fmin_kws['bounds'] = bounds
 
@@ -530,9 +596,10 @@ class Minimizer(object):
             result.ndata = len(result.residual)
             result.nfree = result.ndata - result.nvarys
         result.redchi = result.chisqr / result.nfree
-        _log_likelihood = result.ndata * np.log(result.redchi)
-        result.aic = _log_likelihood + 2 * result.nvarys
-        result.bic = _log_likelihood + np.log(result.ndata) * result.nvarys
+        # this is -2*loglikelihood
+        _neg2_log_likel = result.ndata * np.log(result.chisqr / result.ndata)
+        result.aic = _neg2_log_likel + 2 * result.nvarys
+        result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys
 
         return result
 
@@ -540,6 +607,8 @@ class Minimizer(object):
               ntemps=1, pos=None, reuse_sampler=False, workers=1,
               float_behavior='posterior', is_weighted=True, seed=None):
         """
+        Bayesian sampling of the posterior distribution using the `emcee`.
+
         Bayesian sampling of the posterior distribution for the parameters
         using the `emcee` Markov Chain Monte Carlo package. The method assumes
         that the prior is Uniform. You need to have `emcee` installed to use
@@ -631,7 +700,7 @@ class Minimizer(object):
 
         Returns
         -------
-        result : MinimizerResult
+        :class:`MinimizerResult`
             MinimizerResult object containing updated params, statistics,
             etc. The `MinimizerResult` also contains the ``chain``,
             ``flatchain`` and ``lnprob`` attributes. The ``chain``
@@ -727,6 +796,7 @@ class Minimizer(object):
             tparams = None
 
         result = self.prepare_fit(params=tparams)
+        result.method = 'emcee'
         params = result.params
 
         # check if the userfcn returns a vector of residuals
@@ -784,7 +854,8 @@ class Minimizer(object):
         lnprob_kwargs = {'is_weighted': is_weighted,
                          'float_behavior': float_behavior,
                          'userargs': self.userargs,
-                         'userkws': self.userkws}
+                         'userkws': self.userkws,
+                         'nan_policy': self.nan_policy}
 
         if ntemps > 1:
             # the prior and likelihood function args and kwargs are the same
@@ -884,10 +955,75 @@ class Minimizer(object):
 
         return result
 
+    def least_squares(self, params=None, **kws):
+        """
+        Use the least_squares (new in scipy 0.17) function to perform a fit.
+
+        This method assumes that Parameters have been stored, and a function to
+        minimize has been properly set up.
+        This method wraps scipy.optimize.least_squares, which has inbuilt
+        support for bounds and robust loss functions.
+
+        Parameters
+        ----------
+        params : Parameters, optional
+           Parameters to use as starting points.
+        kws : dict, optional
+            Minimizer options to pass to scipy.optimize.least_squares.
+
+        Returns
+        -------
+        :class:`MinimizerResult`
+            Object containing the optimized parameter
+            and several goodness-of-fit statistics.
+
+
+        .. versionchanged:: 0.9.0
+           return value changed to :class:`MinimizerResult`
+        """
+
+        if not HAS_LEAST_SQUARES:
+            raise NotImplementedError("Scipy with a version higher than 0.17 "
+                                      "is needed for this method.")
+
+        result = self.prepare_fit(params)
+        result.method = 'least_squares'
+
+        replace_none = lambda x, sign: sign*np.inf if x is None else x
+        upper_bounds = [replace_none(i.max, 1) for i in self.params.values()]
+        lower_bounds = [replace_none(i.min, -1) for i in self.params.values()]
+        start_vals = [i.value for i in self.params.values()]
+
+        ret = least_squares(self.__residual,
+                            start_vals,
+                            bounds=(lower_bounds, upper_bounds),
+                            kwargs=dict(apply_bounds_transformation=False),
+                            **kws
+                            )
+
+        for attr in ret:
+            setattr(result, attr, ret[attr])
+
+        result.x = np.atleast_1d(result.x)
+        result.chisqr = result.residual = self.__residual(result.x, False)
+        result.nvarys = len(start_vals)
+        result.ndata = 1
+        result.nfree = 1
+        if isinstance(result.residual, ndarray):
+            result.chisqr = (result.chisqr**2).sum()
+            result.ndata = len(result.residual)
+            result.nfree = result.ndata - result.nvarys
+        result.redchi = result.chisqr / result.nfree
+        # this is -2*loglikelihood
+        _neg2_log_likel = result.ndata * np.log(result.chisqr / result.ndata)
+        result.aic = _neg2_log_likel + 2 * result.nvarys
+        result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys
+        return result
+
     def leastsq(self, params=None, **kws):
         """
         Use Levenberg-Marquardt minimization to perform a fit.
-        This assumes that ModelParameters have been stored, and a function to
+        This assumes that Parameters have been stored, and a function to
         minimize has been properly set up.
 
         This wraps scipy.optimize.leastsq.
@@ -906,10 +1042,15 @@ class Minimizer(object):
 
         Returns
         -------
-        success : bool
-            True if fit was successful, False if not.
+        :class:`MinimizerResult`
+            Object containing the optimized parameter
+            and several goodness-of-fit statistics.
+
+        .. versionchanged:: 0.9.0
+           return value changed to :class:`MinimizerResult`
         """
         result = self.prepare_fit(params=params)
+        result.method = 'leastsq'
         vars = result.init_vals
         nvars = len(vars)
         lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
@@ -944,7 +1085,7 @@ class Minimizer(object):
         elif ier == 0:
             result.message = 'Invalid Input Parameters.'
         elif ier == 5:
-            result.message = self.err_maxfev % lskws['maxfev']
+            result.message = self._err_maxfev % lskws['maxfev']
         else:
             result.message = 'Tolerance seems to be too small.'
 
@@ -953,9 +1094,11 @@ class Minimizer(object):
         result.chisqr = (resid**2).sum()
         result.nfree = (result.ndata - nvars)
         result.redchi = result.chisqr / result.nfree
-        _log_likelihood = result.ndata * np.log(result.redchi)
-        result.aic = _log_likelihood + 2 * nvars
-        result.bic = _log_likelihood + np.log(result.ndata) * nvars
+        result.nvarys = nvars
+        # this is -2*loglikelihood
+        _neg2_log_likel = result.ndata * np.log(result.chisqr / result.ndata)
+        result.aic = _neg2_log_likel + 2 * result.nvarys
+        result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys
 
         params = result.params
 
@@ -1042,30 +1185,43 @@ class Minimizer(object):
         Parameters
         ----------
         method : str, optional
-            Name of the fitting method to use.
-            One of:
-            'leastsq'                -    Levenberg-Marquardt (default)
-            'nelder'                 -    Nelder-Mead
-            'lbfgsb'                 -    L-BFGS-B
-            'powell'                 -    Powell
-            'cg'                     -    Conjugate-Gradient
-            'newton'                 -    Newton-CG
-            'cobyla'                 -    Cobyla
-            'tnc'                    -    Truncate Newton
-            'trust-ncg'              -    Trust Newton-CGn
-            'dogleg'                 -    Dogleg
-            'slsqp'                  -    Sequential Linear Squares Programming
-            'differential_evolution' -    differential evolution
-
-        params : Parameters, optional
-            parameters to use as starting values
+            Name of the fitting method to use. Valid values are:
+
+            - `'leastsq'`: Levenberg-Marquardt (default).
+              Uses `scipy.optimize.leastsq`.
+            - `'least_squares'`: Levenberg-Marquardt.
+              Uses `scipy.optimize.least_squares`.
+            - 'nelder': Nelder-Mead
+            - `'lbfgsb'`: L-BFGS-B
+            - `'powell'`: Powell
+            - `'cg'`: Conjugate-Gradient
+            - `'newton'`: Newton-CG
+            - `'cobyla'`: Cobyla
+            - `'tnc'`: Truncate Newton
+            - `'trust-ncg'`: Trust Newton-CGn
+            - `'dogleg'`: Dogleg
+            - `'slsqp'`: Sequential Linear Squares Programming
+            - `'differential_evolution'`: differential evolution
+
+            For more details on the fitting methods please refer to the
+            `scipy docs <http://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
+
+        params : :class:`lmfit.parameter.Parameters` object.
+            Parameters of the model to use as starting values.
+
+        **kwargs
+            Additional arguments are passed to the underlying minimization
+            method.
 
         Returns
         -------
-        result : MinimizerResult
+        :class:`MinimizerResult`
+            Object containing the optimized parameter
+            and several goodness-of-fit statistics.
 
-            MinimizerResult object contains updated params, fit statistics, etc.
 
+        .. versionchanged:: 0.9.0
+           return value changed to :class:`MinimizerResult`
         """
 
         function = self.leastsq
@@ -1073,19 +1229,16 @@ class Minimizer(object):
         kwargs.update(kws)
 
         user_method = method.lower()
-        if user_method.startswith('least'):
+        if user_method.startswith('leasts'):
             function = self.leastsq
-        elif HAS_SCALAR_MIN:
+        elif user_method.startswith('least_s'):
+            function = self.least_squares
+        else:
             function = self.scalar_minimize
             for key, val in SCALAR_METHODS.items():
                 if (key.lower().startswith(user_method) or
                     val.lower().startswith(user_method)):
                     kwargs['method'] = val
-        elif (user_method.startswith('nelder') or
-              user_method.startswith('fmin')):
-            function = self.fmin
-        elif user_method.startswith('lbfgsb'):
-            function = self.lbfgsb
         return function(**kwargs)
 
 
@@ -1114,7 +1267,8 @@ def _lnprior(theta, bounds):
 
 
 def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
-            userkws=None, float_behavior='posterior', is_weighted=True):
+            userkws=None, float_behavior='posterior', is_weighted=True,
+            nan_policy='raise'):
     """
     Calculates the log-posterior probability. See the `Minimizer.emcee` method
     for more details
@@ -1145,6 +1299,14 @@ def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
     is_weighted : bool
         If `userfcn` returns a vector of residuals then `is_weighted`
         specifies if the residuals have been weighted by data uncertainties.
+    nan_policy : str, optional
+        Specifies action if `userfcn` returns nan
+        values. One of:
+
+            'raise' - a `ValueError` is raised
+            'propagate' - the values returned from `userfcn` are un-altered
+            'omit' - the non-finite values are filtered.
+
 
     Returns
     -------
@@ -1170,6 +1332,8 @@ def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
 
     # now calculate the log-likelihood
     out = userfcn(params, *userargs, **userkwargs)
+    out = _nan_policy(out, nan_policy=nan_policy, handle_inf=False)
+
     lnprob = np.asarray(out).ravel()
 
     if lnprob.size > 1:
@@ -1213,43 +1377,117 @@ def _make_random_gen(seed):
                      ' instance' % seed)
 
 
+def _nan_policy(a, nan_policy='raise', handle_inf=True):
+    """
+    Specifies behaviour when an array contains np.nan or np.inf
+
+    Parameters
+    ----------
+    a : array_like
+        Input array to consider
+    nan_policy : str, optional
+        One of:
+
+        'raise' - raise a `ValueError` if `a` contains NaN.
+        'propagate' - propagate NaN
+        'omit' - filter NaN from input array
+    handle_inf : bool, optional
+        As well as nan consider +/- inf
+
+    Returns
+    -------
+    filtered_array : array_like
+
+    Note
+    ----
+    This function is copied, then modified, from
+    scipy/stats/stats.py/_contains_nan
+    """
+
+    policies = ['propagate', 'raise', 'omit']
+
+    if handle_inf:
+        handler_func =  lambda a: ~np.isfinite(a)
+    else:
+        handler_func = np.isnan
+
+    if nan_policy == 'propagate':
+        # nan values are ignored.
+        return a
+    elif nan_policy == 'raise':
+        try:
+            # Calling np.sum to avoid creating a huge array into memory
+            # e.g. np.isnan(a).any()
+            with np.errstate(invalid='ignore'):
+                contains_nan = handler_func(np.sum(a))
+        except TypeError:
+            # If the check cannot be properly performed we fallback to omiting
+            # nan values and raising a warning. This can happen when attempting to
+            # sum things that are not numbers (e.g. as in the function `mode`).
+            contains_nan = False
+            warnings.warn("The input array could not be properly checked for nan "
+                          "values. nan values will be ignored.", RuntimeWarning)
+
+        if contains_nan:
+            raise ValueError("The input contains nan values")
+        return a
+
+    elif nan_policy == 'omit':
+        # nans are filtered
+        mask = handler_func(a)
+        return a[~mask]
+    else:
+        raise ValueError("nan_policy must be one of {%s}" %
+                         ', '.join("'%s'" % s for s in policies))
+
+
 def minimize(fcn, params, method='leastsq', args=None, kws=None,
              scale_covar=True, iter_cb=None, **fit_kws):
     """
-    A general purpose curvefitting function
-    The minimize function takes a objective function to be minimized, a
-    dictionary (lmfit.parameter.Parameters) containing the model parameters,
-    and several optional arguments.
+    This function performs a fit of a set of parameters by minimizing
+    an objective (or "cost") function using one one of the several
+    available methods. The minimize function takes a objective function
+    to be minimized, a dictionary (:class:`lmfit.parameter.Parameters`)
+    containing the model parameters, and several optional arguments.
 
     Parameters
     ----------
     fcn : callable
-        objective function that returns the residual (difference between
-        model and data) to be minimized in a least squares sense.  The
-        function must have the signature:
+        objective function to be minimized. When method is `leastsq` or
+        `least_squares`, the objective function should return an array
+        of residuals (difference between model and data) to be minimized
+        in a least squares sense. With the scalar methods the objective
+        function can either return the residuals array or a single scalar
+        value. The function must have the signature:
         `fcn(params, *args, **kws)`
-    params : lmfit.parameter.Parameters object.
+    params : :class:`lmfit.parameter.Parameters` object.
         contains the Parameters for the model.
     method : str, optional
-        Name of the fitting method to use.
-        One of:
-            'leastsq'                -    Levenberg-Marquardt (default)
-            'nelder'                 -    Nelder-Mead
-            'lbfgsb'                 -    L-BFGS-B
-            'powell'                 -    Powell
-            'cg'                     -    Conjugate-Gradient
-            'newton'                 -    Newton-CG
-            'cobyla'                 -    Cobyla
-            'tnc'                    -    Truncate Newton
-            'trust-ncg'              -    Trust Newton-CGn
-            'dogleg'                 -    Dogleg
-            'slsqp'                  -    Sequential Linear Squares Programming
-            'differential_evolution' -    differential evolution
+        Name of the fitting method to use. Valid values are:
+
+        - `'leastsq'`: Levenberg-Marquardt (default).
+          Uses `scipy.optimize.leastsq`.
+        - `'least_squares'`: Levenberg-Marquardt.
+          Uses `scipy.optimize.least_squares`.
+        - 'nelder': Nelder-Mead
+        - `'lbfgsb'`: L-BFGS-B
+        - `'powell'`: Powell
+        - `'cg'`: Conjugate-Gradient
+        - `'newton'`: Newton-CG
+        - `'cobyla'`: Cobyla
+        - `'tnc'`: Truncate Newton
+        - `'trust-ncg'`: Trust Newton-CGn
+        - `'dogleg'`: Dogleg
+        - `'slsqp'`: Sequential Linear Squares Programming
+        - `'differential_evolution'`: differential evolution
+
+        For more details on the fitting methods please refer to the
+        `scipy docs <http://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
 
     args : tuple, optional
-        Positional arguments to pass to fcn.
+        Positional arguments to pass to `fcn`.
     kws : dict, optional
-        keyword arguments to pass to fcn.
+        keyword arguments to pass to `fcn`.
     iter_cb : callable, optional
         Function to be called at each fit iteration. This function should
         have the signature `iter_cb(params, iter, resid, *args, **kws)`,
@@ -1262,6 +1500,16 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
     fit_kws : dict, optional
         Options to pass to the minimizer being used.
 
+    Returns
+    -------
+    :class:`MinimizerResult`
+        Object containing the optimized parameter
+        and several goodness-of-fit statistics.
+
+
+    .. versionchanged:: 0.9.0
+        return value changed to :class:`MinimizerResult`.
+
     Notes
     -----
     The objective function should return the value to be minimized. For the
@@ -1276,6 +1524,19 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
     data needed to calculate the residual, including such things as the
     data array, dependent variable, uncertainties in the data, and other
     data structures for the model calculation.
+
+    On output, the params will be unchanged.  The best-fit values, and where
+    appropriate, estimated uncertainties and correlations, will all be
+    contained in the returned :class:`MinimizerResult`.  See
+    :ref:`fit-results-label` for further details.
+
+    This function is simply a wrapper around :class:`Minimizer`
+    and is equivalent to::
+
+        fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
+        	           iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
+        fitter.minimize(method=method)
+
     """
     fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
                        iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
diff --git a/lmfit/model.py b/lmfit/model.py
index 5dd1643..5a65d7f 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -277,6 +277,10 @@ class Model(object):
             # apply defaults from model function definition
             if basename in self.def_vals:
                 par.value = self.def_vals[basename]
+            if par.value in (None, -np.inf, np.inf, np.nan):
+                for key, val in self.def_vals.items():
+                    if key in name.lower():
+                        par.value = val
             # apply defaults from parameter hints
             if basename in self.param_hints:
                 hint = self.param_hints[basename]
@@ -405,12 +409,7 @@ class Model(object):
 
     def eval(self, params=None, **kwargs):
         """evaluate the model with the supplied parameters"""
-        result = self.func(**self.make_funcargs(params, kwargs))
-        # Handle special case of constant result and one
-        # independent variable (of any dimension).
-        if np.ndim(result) == 0 and len(self.independent_vars) == 1:
-            result = np.tile(result, kwargs[self.independent_vars[0]].shape)
-        return result
+        return self.func(**self.make_funcargs(params, kwargs))
 
     @property
     def components(self):
@@ -690,20 +689,20 @@ class ModelResult(Minimizer):
          the previous fit.  This allows easily changing data or
          parameter settings, or both.
 
-    eval(**kwargs)
-         evaluate the current model, with the current parameter values,
-         with values in kwargs sent to the model function.
+    eval(params=None, **kwargs)
+         evaluate the current model, with parameters (defaults to the current
+         parameter values), with values in kwargs sent to the model function.
 
-    eval_components(**kwargs)
-         evaluate the current model, with the current parameter values,
-         with values in kwargs sent to the model function and returns
-         a ordered dict with the model names as the key and the component
-         results as the values.
+    eval_components(params=Nones, **kwargs)
+         evaluate the current model, with parameters (defaults to the current
+         parameter values), with values in kwargs sent to the model function
+         and returns an ordered dict with the model names as the key and the
+         component results as the values.
 
    fit_report(modelpars=None, show_correl=True, min_correl=0.1)
          return a fit report.
 
-   plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+   plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', xlabel = None, ylabel=None,
             numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
             ax_kws=None)
         Plot the fit results using matplotlib.
@@ -712,7 +711,7 @@ class ModelResult(Minimizer):
                   ax_kws=None)
         Plot the fit residuals using matplotlib.
 
-   plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
+   plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, numpoints=None,
         data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None,
         ax_fit_kws=None, fig_kws=None)
         Plot the fit results and residuals using matplotlib.
@@ -758,13 +757,36 @@ class ModelResult(Minimizer):
         self.best_values = self.model._make_all_args(_ret.params)
         self.best_fit    = self.model.eval(params=_ret.params, **self.userkws)
 
-    def eval(self, **kwargs):
+    def eval(self, params=None, **kwargs):
+        """
+        evaluate model function
+        Arguments:
+            params (Parameters):  parameters, defaults to ModelResult .params
+            kwargs (variable):  values of options, independent variables, etc
+
+        Returns:
+            ndarray or float for evaluated model
+        """
         self.userkws.update(kwargs)
-        return self.model.eval(params=self.params, **self.userkws)
+        if params is None:
+           params = self.params
+        return self.model.eval(params=params, **self.userkws)
 
-    def eval_components(self, **kwargs):
+    def eval_components(self, params=None, **kwargs):
+        """
+        evaluate each component of a composite model function
+        Arguments:
+            params (Parameters):  parameters, defaults to ModelResult .params
+            kwargs (variable):  values of options, independent variables, etc
+
+        Returns:
+            ordered dictionary with keys of prefixes, and values of values for
+            each component of the model.
+        """
         self.userkws.update(kwargs)
-        return self.model.eval_components(params=self.params, **self.userkws)
+        if params is None:
+            params = self.params
+        return self.model.eval_components(params=params, **self.userkws)
 
     def conf_interval(self, **kwargs):
         """return explicitly calculated confidence intervals"""
@@ -783,7 +805,7 @@ class ModelResult(Minimizer):
                                             fit_report(self, **kwargs))
 
     @_ensureMatplotlib
-    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
+    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, yerr=None,
                  numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
                  ax_kws=None):
         """Plot the fit results using matplotlib.
@@ -803,6 +825,10 @@ class ModelResult(Minimizer):
             matplotlib format string for fitted curve
         initfmt : string, optional
             matplotlib format string for initial conditions for the fit
+        xlabel : string, optional
+            matplotlib format string for labeling the x-axis
+        ylabel : string, optional
+            matplotlib format string for labeling the y-axis
         yerr : ndarray, optional
             array of uncertainties for data array
         numpoints : int, optional
@@ -882,8 +908,12 @@ class ModelResult(Minimizer):
             ax.plot(x_array, self.data, datafmt, label='data', **data_kws)
 
         ax.set_title(self.model.name)
-        ax.set_xlabel(independent_var)
-        ax.set_ylabel('y')
+        if xlabel is None:
+            ax.set_xlabel(independent_var)
+        else: ax.set_xlabel(xlabel)
+        if ylabel is None:
+            ax.set_ylabel('y')
+        else: ax.set_ylabel(ylabel)
         ax.legend()
 
         return ax
@@ -972,7 +1002,7 @@ class ModelResult(Minimizer):
         return ax
 
     @_ensureMatplotlib
-    def plot(self, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
+    def plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, yerr=None,
              numpoints=None, fig=None, data_kws=None, fit_kws=None,
              init_kws=None, ax_res_kws=None, ax_fit_kws=None,
              fig_kws=None):
@@ -990,6 +1020,10 @@ class ModelResult(Minimizer):
             matplotlib format string for fitted curve
         initfmt : string, optional
             matplotlib format string for initial conditions for the fit
+        xlabel : string, optional
+            matplotlib format string for labeling the x-axis
+        ylabel : string, optional
+            matplotlib format string for labeling the y-axis
         yerr : ndarray, optional
             array of uncertainties for data array
         numpoints : int, optional
@@ -1058,7 +1092,7 @@ class ModelResult(Minimizer):
         ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
 
         self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
-                      initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,
+                      initfmt=initfmt, xlabel=xlabel, ylabel=ylabel, numpoints=numpoints, data_kws=data_kws,
                       fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)
         self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
                             data_kws=data_kws, fit_kws=fit_kws,
diff --git a/lmfit/models.py b/lmfit/models.py
index 78d024d..d508fda 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -35,7 +35,7 @@ def fwhm_expr(model):
 
 def height_expr(model):
     "return constraint expression for maximum peak height"
-    fmt = "{factor:.7f}*{prefix:s}amplitude/{prefix:s}sigma"
+    fmt = "{factor:.7f}*{prefix:s}amplitude/max(1.e-15, {prefix:s}sigma)"
     return fmt.format(factor=model.height_factor, prefix=model.prefix)
 
 def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 0581148..9750036 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -2,7 +2,6 @@
 Parameter class
 """
 from __future__ import division
-from numpy import arcsin, cos, sin, sqrt, inf, nan, isfinite
 import json
 from copy import deepcopy
 try:
@@ -10,8 +9,8 @@ try:
 except ImportError:
     from ordereddict import OrderedDict
 
+from numpy import array, arcsin, cos, sin, sqrt, inf, nan, isfinite
 from . import uncertainties
-
 from .asteval import Interpreter
 from .astutils import get_ast_names, valid_symbol_name
 
@@ -98,7 +97,7 @@ class Parameters(OrderedDict):
         # find the symbols that were added by users, not during construction
         sym_unique = self._asteval.user_defined_symbols()
         unique_symbols = {key: deepcopy(self._asteval.symtable[key], memo)
-                          for key in sym_unique}
+                              for key in sym_unique}
         _pars._asteval.symtable.update(unique_symbols)
 
         # we're just about to add a lot of Parameter objects to the newly
@@ -152,6 +151,12 @@ class Parameters(OrderedDict):
         self.add_many(*params)
         return self
 
+    def __array__(self):
+        """
+        Parameters to array
+        """
+        return array([float(k) for k in self.values()])
+
     def __reduce__(self):
         """
         Required to pickle a Parameters instance.
@@ -191,14 +196,13 @@ class Parameters(OrderedDict):
         # then add all the parameters
         self.add_many(*state['params'])
 
-
     def update_constraints(self):
         """
         Update all constrained parameters, checking that dependencies are
         evaluated as needed.
         """
-        requires_update = {name for name, par in self.items()
-                           if par._expr is not None}
+        requires_update = set(name for name, par in self.items()
+                              if par._expr is not None)
         updated_tracker = set(requires_update)
 
         def _update_param(name):
@@ -242,11 +246,11 @@ class Parameters(OrderedDict):
         columns : list of strings
             list of columns names to print. All values must be valid
             :class:`Parameter` attributes.
-        precision : int
-            number of digits to be printed after floating point.
-        format : string
+        fmt : string
             single-char numeric formatter. Valid values: 'f' floating point,
             'g' floating point and exponential, 'e' exponential.
+        precision : int
+            number of digits to be printed after floating point.
         """
         if oneline:
             print(self.pretty_repr(oneline=oneline))
@@ -617,6 +621,7 @@ class Parameter(object):
         # becomes stale if parameter.expr is not None.
         if (isinstance(self._val, uncertainties.Variable)
             and self._val is not nan):
+
             try:
                 self.value = self._val.nominal_value
             except AttributeError:
@@ -634,8 +639,10 @@ class Parameter(object):
                     check_ast_errors(self._expr_eval)
 
         v = self._val
-        if v > self.max: v = self.max
-        if v < self.min: v = self.min
+        if v > self.max:
+            v = self.max
+        if v < self.min:
+            v = self.min
         self.value = self._val = v
         return self._val
 
@@ -689,6 +696,10 @@ class Parameter(object):
             check_ast_errors(self._expr_eval)
             self._expr_deps = get_ast_names(self._expr_ast)
 
+    def __array__(self):
+        """array"""
+        return array(float(self._getval()))
+
     def __str__(self):
         """string"""
         return self.__repr__()
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index 4b279b0..11ae463 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -43,9 +43,9 @@ def gformat(val, length=11):
     and have at least length-6 significant digits
     """
     length = max(length, 7)
-    fmt = '{: .%ig}' % (length-6)
+    fmt = '{0: .%ig}' % (length-6)
     if isinstance(val, int):
-        out = ('{: .%ig}' % (length-2)).format(val)
+        out = ('{0: .%ig}' % (length-2)).format(val)
         if len(out) > length:
             out = fmt.format(val)
     else:
@@ -57,7 +57,7 @@ def gformat(val, length=11):
                 out = out[:ie] + '.' + out[ie:]
             out = out.replace('e', '0'*(length-len(out))+'e')
         else:
-            fmt = '{: .%ig}' % (length-1)
+            fmt = '{0: .%ig}' % (length-1)
             out = fmt.format(val)[:length]
             if len(out) < length:
                 pad = '0' if '.' in  out else ' '
@@ -133,7 +133,7 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
             serr = gformat(par.stderr, length=9)
 
             try:
-                spercent = '({:.2%})'.format(abs(par.stderr/par.value))
+                spercent = '({0:.2%})'.format(abs(par.stderr/par.value))
             except ZeroDivisionError:
                 spercent = ''
             sval = '%s +/-%s %s' % (sval, serr, spercent)
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
index 1f8f815..60a0987 100644
--- a/lmfit/ui/basefitter.py
+++ b/lmfit/ui/basefitter.py
@@ -149,9 +149,9 @@ class BaseFitter(object):
                 val = self.kwargs[key]
                 d = {key: val}
                 guess = self.model.guess(self._data, **d)
+            self.current_params = guess
         except NotImplementedError:
             guessing_successful = False
-        self.current_params = guess
         return guessing_successful
 
     def __assign_deps(self, params):
diff --git a/requirements.txt b/requirements.txt
index fe73b4f..99f0a77 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,2 @@
 numpy>=1.5
-scipy>=0.13
+scipy>=0.14
diff --git a/tests/_test_ci.py b/tests/_test_ci.py
index 86afcaf..7e0d0f3 100644
--- a/tests/_test_ci.py
+++ b/tests/_test_ci.py
@@ -13,10 +13,10 @@ def test_ci():
     p_true.add('decay', value=0.010)
 
     def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
+        amp = pars['amp']
+        per = pars['period']
+        shift = pars['shift']
+        decay = pars['decay']
 
         if abs(shift) > pi / 2:
             shift = shift - np.sign(shift) * pi
@@ -55,4 +55,3 @@ def test_ci():
         stderr = out.params[p].stderr
         assert(abs(diff1 - stderr) / stderr < 0.05)
         assert(abs(diff2 - stderr) / stderr < 0.05)
-
diff --git a/tests/test_1variable.py b/tests/test_1variable.py
index 3e3d530..e7e34de 100644
--- a/tests/test_1variable.py
+++ b/tests/test_1variable.py
@@ -21,11 +21,7 @@ def linear_chisq(params, x, data, errs=None):
         msg = "No intercept parameter (c) defined in the model"
         raise KeyError(msg)
 
-    m = params["m"].value
-    c = params["c"].value
-
-    model = m*x+c
-
+    model = params["m"]*x + params["c"]
     residuals = (data-model)
     if errs is not None:
         residuals = residuals/errs
diff --git a/tests/test_algebraic_constraint.py b/tests/test_algebraic_constraint.py
index 1764b7f..e37cf15 100644
--- a/tests/test_algebraic_constraint.py
+++ b/tests/test_algebraic_constraint.py
@@ -5,14 +5,10 @@ from lmfit.printfuncs import report_fit
 
 def test_constraints1():
     def residual(pars, x, sigma=None, data=None):
-        yg = gaussian(x, pars['amp_g'].value,
-                      pars['cen_g'].value, pars['wid_g'].value)
-        yl = lorentzian(x, pars['amp_l'].value,
-                   pars['cen_l'].value, pars['wid_l'].value)
-
-        slope = pars['line_slope'].value
-        offset = pars['line_off'].value
-        model =  yg +  yl + offset + x * slope
+        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
+        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])
+
+        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
         if data is None:
             return model
         if sigma is None:
@@ -68,14 +64,10 @@ def test_constraints1():
 def test_constraints2():
     """add a user-defined function to symbol table"""
     def residual(pars, x, sigma=None, data=None):
-        yg = gaussian(x, pars['amp_g'].value,
-                      pars['cen_g'].value, pars['wid_g'].value)
-        yl = lorentzian(x, pars['amp_l'].value,
-                   pars['cen_l'].value, pars['wid_l'].value)
-
-        slope = pars['line_slope'].value
-        offset = pars['line_off'].value
-        model =  yg +  yl + offset + x * slope
+        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
+        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])
+
+        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
         if data is None:
             return model
         if sigma is None:
diff --git a/tests/test_algebraic_constraint2.py b/tests/test_algebraic_constraint2.py
index 45a9b6a..ab64cef 100644
--- a/tests/test_algebraic_constraint2.py
+++ b/tests/test_algebraic_constraint2.py
@@ -23,14 +23,10 @@ def test_constraints(with_plot=True):
     with_plot = with_plot and WITHPLOT
 
     def residual(pars, x, sigma=None, data=None):
-        yg = gaussian(x, pars['amp_g'].value,
-                   pars['cen_g'].value, pars['wid_g'].value)
-        yl = lorentzian(x, pars['amp_l'].value,
-                   pars['cen_l'].value, pars['wid_l'].value)
-
-        slope = pars['line_slope'].value
-        offset = pars['line_off'].value
-        model =  yg +  yl + offset + x * slope
+        yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
+        yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])
+
+        model =  yg +  yl + pars['line_off'] + x * pars['line_slope']
         if data is None:
             return model
         if sigma is None:
@@ -55,12 +51,12 @@ def test_constraints(with_plot=True):
     pfit.add(name='amp_g',  value=10)
     pfit.add(name='cen_g',  value=9)
     pfit.add(name='wid_g',  value=1)
-    
+
     pfit.add(name='amp_tot',  value=20)
     pfit.add(name='amp_l',  expr='amp_tot - amp_g')
     pfit.add(name='cen_l',  expr='1.5+cen_g')
     pfit.add(name='wid_l',  expr='2*wid_g')
-    
+
     pfit.add(name='line_slope', value=0.0)
     pfit.add(name='line_off', value=0.0)
 
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
index 98b6338..f7f23bd 100644
--- a/tests/test_basicfit.py
+++ b/tests/test_basicfit.py
@@ -12,10 +12,10 @@ def test_basic():
     # define objective function: returns the array to be minimized
     def fcn2min(params, x, data):
         """ model decaying sine wave, subtract data"""
-        amp = params['amp'].value
-        shift = params['shift'].value
-        omega = params['omega'].value
-        decay = params['decay'].value
+        amp = params['amp']
+        shift = params['shift']
+        omega = params['omega']
+        decay = params['decay']
 
         model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
         return model - data
diff --git a/tests/test_bounded_jacobian.py b/tests/test_bounded_jacobian.py
index 810a505..e88b425 100644
--- a/tests/test_bounded_jacobian.py
+++ b/tests/test_bounded_jacobian.py
@@ -14,16 +14,16 @@ def test_bounded_jacobian():
     jac_count = 0
 
     def resid(params):
-        x0 = params['x0'].value
-        x1 = params['x1'].value
+        x0 = params['x0']
+        x1 = params['x1']
         return np.array([10 * (x1 - x0*x0), 1-x0])
 
     def jac(params):
         global jac_count
         jac_count += 1
-        x0 = params['x0'].value
+        x0 = params['x0']
         return np.array([[-20*x0, 10], [-1, 0]])
-    
+
     out0 = minimize(resid, pars, Dfun=None)
 
     assert_paramval(out0.params['x0'], 1.2243, tol=0.02)
diff --git a/tests/test_bounds.py b/tests/test_bounds.py
index 99c962d..a1f5ed4 100644
--- a/tests/test_bounds.py
+++ b/tests/test_bounds.py
@@ -11,10 +11,10 @@ def test_bounds():
     p_true.add('decay', value=0.01000)
 
     def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
+        amp = pars['amp']
+        per = pars['period']
+        shift = pars['shift']
+        decay = pars['decay']
 
         if abs(shift) > pi/2:
             shift = shift - sign(shift)*pi
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
index 2b5d290..f000d95 100644
--- a/tests/test_confidence.py
+++ b/tests/test_confidence.py
@@ -5,15 +5,10 @@ import lmfit
 from lmfit_testutils import assert_paramval
 
 def residual(params, x, data):
-    a = params['a'].value
-    b = params['b'].value
-    return data - 1.0/(a*x)+b
+    return data - 1.0/(params['a']*x)+ params['b']
 
 def residual2(params, x, data):
-    a = params['a'].value
-    b = params['b'].value
-    c = params['c'].value
-    return data - c/(a*x)+b
+    return data - params['c']/(params['a']*x)+params['b']
 
 def test_confidence1():
     x = np.linspace(0.3,10,100)
diff --git a/tests/test_copy_params.py b/tests/test_copy_params.py
index e17aa18..8841684 100644
--- a/tests/test_copy_params.py
+++ b/tests/test_copy_params.py
@@ -9,10 +9,7 @@ def get_data():
     return x, y1, y2
 
 def residual(params, x, data):
-    a = params['a'].value
-    b = params['b'].value
-
-    model = a*np.exp(b*x)
+    model = params['a']*np.exp(params['b']*x)
     return (data-model)
 
 def test_copy_params():
@@ -33,4 +30,3 @@ def test_copy_params():
 
     assert(abs(adiff) > 1.e-2)
     assert(abs(bdiff) > 1.e-2)
-
diff --git a/tests/test_bounds.py b/tests/test_least_squares.py
similarity index 77%
copy from tests/test_bounds.py
copy to tests/test_least_squares.py
index 99c962d..4195a4d 100644
--- a/tests/test_bounds.py
+++ b/tests/test_least_squares.py
@@ -1,9 +1,13 @@
-from lmfit import Parameters, minimize, fit_report
+from lmfit import Parameters, minimize, fit_report, Minimizer
+from lmfit.minimizer import HAS_LEAST_SQUARES
 from lmfit_testutils import assert_paramval, assert_paramattr
 
 from numpy import linspace, zeros, sin, exp, random, pi, sign
+import nose
 
 def test_bounds():
+    if not HAS_LEAST_SQUARES:
+        raise nose.SkipTest
     p_true = Parameters()
     p_true.add('amp', value=14.0)
     p_true.add('period', value=5.4321)
@@ -11,10 +15,10 @@ def test_bounds():
     p_true.add('decay', value=0.01000)
 
     def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
+        amp = pars['amp']
+        per = pars['period']
+        shift = pars['shift']
+        decay = pars['decay']
 
         if abs(shift) > pi/2:
             shift = shift - sign(shift)*pi
@@ -38,9 +42,8 @@ def test_bounds():
     fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
     fit_params.add('decay', value=0.02, max=0.10, min=0.00)
 
-    out = minimize(residual, fit_params, args=(x,), kws={'data':data})
-
-    fit = residual(out.params, x)
+    min = Minimizer(residual, fit_params, (x, data))
+    out = min.least_squares()
 
     assert(out.nfev  > 10)
     assert(out.nfree > 50)
diff --git a/tests/test_model.py b/tests/test_model.py
index 9cede12..e176139 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -1,7 +1,7 @@
 import unittest
 import warnings
 import nose
-from numpy.testing import assert_allclose
+from numpy.testing import assert_allclose, assert_raises
 from numpy.testing.decorators import knownfailureif
 import numpy as np
 
@@ -57,12 +57,12 @@ class CommonTests(object):
         assert_results_close(result.values, self.true_values())
 
         # Pass inidividual Parameter objects as kwargs.
-        kwargs = {name: p for name, p in params.items()}
+        kwargs = dict((name, p) for name, p in params.items())
         result = self.model.fit(self.data, x=self.x, **kwargs)
         assert_results_close(result.values, self.true_values())
 
         # Pass guess values (not Parameter objects) as kwargs.
-        kwargs = {name: p.value for name, p in params.items()}
+        kwargs = dict((name, p.value) for name, p in params.items())
         result = self.model.fit(self.data, x=self.x, **kwargs)
         assert_results_close(result.values, self.true_values())
 
@@ -119,7 +119,6 @@ class CommonTests(object):
         # Check eval() output against init_fit and best_fit.
         pars = self.model.make_params(**self.guess())
         result = self.model.fit(self.data, pars, x=self.x)
-
         assert_allclose(result.eval(x=self.x, **result.values),
                         result.best_fit)
         assert_allclose(result.eval(x=self.x, **result.init_values),
@@ -132,7 +131,8 @@ class CommonTests(object):
 
         # Check that the independent variable is respected.
         short_eval = result.eval(x=np.array([0, 1, 2]), **result.values)
-        self.assertEqual(len(short_eval), 3)
+        if hasattr(short_eval, '__len__'):
+            self.assertEqual(len(short_eval), 3)
 
     def test_data_alignment(self):
         _skip_if_no_pandas()
@@ -218,7 +218,11 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
     def test_lists_become_arrays(self):
         # smoke test
         self.model.fit([1, 2, 3], x=[1, 2, 3], **self.guess())
-        self.model.fit([1, 2, None, 3], x=[1, 2, 3, 4], **self.guess())
+        assert_raises(ValueError,
+                      self.model.fit,
+                      [1, 2, None, 3],
+                      x=[1, 2, 3, 4],
+                      **self.guess())
 
     def test_missing_param_raises_error(self):
 
@@ -429,7 +433,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
 
         mx = (m1 + m2)
         params = mx.make_params()
-        param_values = {name: p.value for name, p in params.items()}
+        param_values = dict((name, p.value) for name, p in params.items())
         self.assertEqual(param_values['p1_amplitude'], 1)
         self.assertEqual(param_values['p2_amplitude'], 2)
 
@@ -448,7 +452,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
 
         m = m1 + m2
 
-        param_values = {name: p.value for name, p in params.items()}
+        param_values = dict((name, p.value) for name, p in params.items())
         self.assertTrue(param_values['m1_intercept'] < -0.0)
         self.assertEqual(param_values['m2_amplitude'], 1)
 
@@ -581,5 +585,4 @@ class TestComplexConstant(CommonTests, unittest.TestCase):
         self.guess = lambda: dict(re=2,im=2)
         self.model_constructor = models.ComplexConstantModel
         super(TestComplexConstant, self).setUp()
-
 #
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
index 985a70c..5d99140 100644
--- a/tests/test_multidatasets.py
+++ b/tests/test_multidatasets.py
@@ -8,9 +8,9 @@ from lmfit.lineshapes import gaussian
 def gauss_dataset(params, i, x):
     """calc gaussian from params for data set i
     using simple, hardwired naming convention"""
-    amp = params['amp_%i' % (i+1)].value
-    cen = params['cen_%i' % (i+1)].value
-    sig = params['sig_%i' % (i+1)].value
+    amp = params['amp_%i' % (i+1)]
+    cen = params['cen_%i' % (i+1)]
+    sig = params['sig_%i' % (i+1)]
     return gaussian(x, amp, cen, sig)
 
 def objective(params, x, data):
diff --git a/tests/test_nose.py b/tests/test_nose.py
index b5ad44a..9e4f4dd 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -2,12 +2,12 @@
 from __future__ import print_function
 from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
 from lmfit.minimizer import (SCALAR_METHODS, HAS_EMCEE,
-                             MinimizerResult, _lnpost)
+                             MinimizerResult, _lnpost, _nan_policy)
 from lmfit.lineshapes import gaussian
 import numpy as np
 from numpy import pi
 from numpy.testing import (assert_, decorators, assert_raises,
-                           assert_almost_equal)
+                           assert_almost_equal, assert_equal)
 import unittest
 import nose
 from nose import SkipTest
@@ -37,10 +37,10 @@ def test_simple():
     # define objective function: returns the array to be minimized
     def fcn2min(params, x, data):
         """ model decaying sine wave, subtract data"""
-        amp = params['amp'].value
-        shift = params['shift'].value
-        omega = params['omega'].value
-        decay = params['decay'].value
+        amp = params['amp']
+        shift = params['shift']
+        omega = params['omega']
+        decay = params['decay']
 
         model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
         return model - data
@@ -77,10 +77,10 @@ def test_lbfgsb():
     p_true.add('decay', value=0.010)
 
     def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
+        amp = pars['amp']
+        per = pars['period']
+        shift = pars['shift']
+        decay = pars['decay']
 
         if abs(shift) > pi/2:
             shift = shift - np.sign(shift) * pi
@@ -117,21 +117,14 @@ def test_lbfgsb():
 
 def test_derive():
     def func(pars, x, data=None):
-        a = pars['a'].value
-        b = pars['b'].value
-        c = pars['c'].value
-
-        model=a * np.exp(-b * x)+c
+        model= pars['a'] * np.exp(-pars['b'] * x) + pars['c']
         if data is None:
             return model
         return model - data
 
     def dfunc(pars, x, data=None):
-        a = pars['a'].value
-        b = pars['b'].value
-        c = pars['c'].value
-        v = np.exp(-b*x)
-        return np.array([v, -a*x*v, np.ones(len(x))])
+        v = np.exp(-pars['b']*x)
+        return np.array([v, -pars['a']*x*v, np.ones(len(x))])
 
     def f(var, x):
         return var[0]* np.exp(-var[1] * x)+var[2]
@@ -187,8 +180,8 @@ def test_derive():
 
 def test_peakfit():
     def residual(pars, x, data=None):
-        g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
-        g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
+        g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
+        g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
         model = g1 + g2
         if data is None:
             return model
@@ -256,10 +249,10 @@ def test_scalar_minimize_has_no_uncertainties():
     # define objective function: returns the array to be minimized
     def fcn2min(params, x, data):
         """ model decaying sine wave, subtract data"""
-        amp = params['amp'].value
-        shift = params['shift'].value
-        omega = params['omega'].value
-        decay = params['decay'].value
+        amp = params['amp']
+        shift = params['shift']
+        omega = params['omega']
+        decay = params['decay']
 
         model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
         return model - data
@@ -301,9 +294,7 @@ def test_multidimensional_fit_GH205():
 
     def fcn2min(params, xv, yv, data):
         """ model decaying sine wave, subtract data"""
-        lambda1 = params['lambda1'].value
-        lambda2 = params['lambda2'].value
-        model = f(xv, yv, lambda1, lambda2)
+        model = f(xv, yv, params['lambda1'], params['lambda2'])
         return model - data
 
     # create a set of Parameters
@@ -345,10 +336,10 @@ class CommonMinimizerTest(unittest.TestCase):
         self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
 
     def residual(self, pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
+        amp = pars['amp']
+        per = pars['period']
+        shift = pars['shift']
+        decay = pars['decay']
 
         if abs(shift) > pi/2:
             shift = shift - np.sign(shift) * pi
@@ -361,9 +352,14 @@ class CommonMinimizerTest(unittest.TestCase):
         # You need finite (min, max) for each parameter if you're using
         # differential_evolution.
         self.fit_params['decay'].min = -np.inf
+        self.fit_params['decay'].vary = True
         self.minimizer = 'differential_evolution'
         np.testing.assert_raises(ValueError, self.scalar_minimizer)
 
+        # but only if a parameter is not fixed
+        self.fit_params['decay'].vary = False
+        self.mini.scalar_minimize(method='differential_evolution', maxiter=1)
+
     def test_scalar_minimizers(self):
         # test all the scalar minimizers
         for method in SCALAR_METHODS:
@@ -395,6 +391,39 @@ class CommonMinimizerTest(unittest.TestCase):
                                    self.p_true.values()):
             check_wo_stderr(para, true_para.value, sig=sig)
 
+    def test_nan_policy(self):
+        # check that an error is raised if there are nan in
+        # the data returned by userfcn
+        self.data[0] = np.nan
+
+        for method in SCALAR_METHODS:
+            assert_raises(ValueError,
+                          self.mini.scalar_minimize,
+                          SCALAR_METHODS[method])
+
+        assert_raises(ValueError, self.mini.minimize)
+
+        # now check that the fit proceeds if nan_policy is 'omit'
+        self.mini.nan_policy = 'omit'
+        res = self.mini.minimize()
+        assert_equal(res.ndata, np.size(self.data, 0) - 1)
+
+        for para, true_para in zip(res.params.values(),
+                                   self.p_true.values()):
+            check_wo_stderr(para, true_para.value, sig=0.15)
+
+    def test_nan_policy_function(self):
+        a = np.array([0, 1, 2, 3, np.nan])
+        assert_raises(ValueError, _nan_policy, a)
+        assert_(np.isnan(_nan_policy(a, nan_policy='propagate')[-1]))
+        assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
+
+        a[-1] = np.inf
+        assert_raises(ValueError, _nan_policy, a)
+        assert_(np.isposinf(_nan_policy(a, nan_policy='propagate')[-1]))
+        assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
+        assert_equal(_nan_policy(a, handle_inf=False), a)
+
     @decorators.slow
     def test_emcee(self):
         # test emcee
@@ -591,10 +620,10 @@ class CommonMinimizerTest(unittest.TestCase):
 def residual_for_multiprocessing(pars, x, data=None):
     # a residual function defined in the top level is needed for
     # multiprocessing. bound methods don't work.
-    amp = pars['amp'].value
-    per = pars['period'].value
-    shift = pars['shift'].value
-    decay = pars['decay'].value
+    amp = pars['amp']
+    per = pars['period']
+    shift = pars['shift']
+    decay = pars['decay']
 
     if abs(shift) > pi/2:
         shift = shift - np.sign(shift) * pi
diff --git a/tests/test_params_set.py b/tests/test_params_set.py
index 24b1089..ebf7ff9 100644
--- a/tests/test_params_set.py
+++ b/tests/test_params_set.py
@@ -45,4 +45,4 @@ def test_param_set():
     assert(params['gamma'].vary)
     assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
 
-test_param_set()
\ No newline at end of file
+test_param_set()

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git



More information about the debian-science-commits mailing list