[sagemath] 01/01: Add patch for scipy 0.19.1.

Tobias Hansen thansen at moszumanska.debian.org
Sun Sep 10 20:01:29 UTC 2017


This is an automated email from the git hooks/post-receive script.

thansen pushed a commit to branch master
in repository sagemath.

commit e33faccf264a6530ccfb979606b4bc81bdddcb14
Author: Tobias Hansen <thansen at debian.org>
Date:   Sun Sep 10 21:00:43 2017 +0100

    Add patch for scipy 0.19.1.
---
 debian/changelog                                   |   9 +
 debian/control.runtime-depends                     |   2 +-
 debian/patches/series                              |   1 +
 debian/patches/u0-version-scipy-0.19.1.patch       | 366 +++++++++++++++++++++
 .../patches/u2-fix-less-trivial-test-cases.patch   |  57 ----
 5 files changed, 377 insertions(+), 58 deletions(-)

diff --git a/debian/changelog b/debian/changelog
index dbdfee3..a39b2ec 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,12 @@
+sagemath (8.0-7) UNRELEASED; urgency=medium
+
+  * Add patch u0-version-scipy-0.19.1.patch from upstream.
+  * Remove parts of u2-fix-less-trivial-test-cases.patch which are
+    included in u0-version-scipy-0.19.1.patch.
+  * Depend on scipy >= 0.19.1.
+
+ -- Tobias Hansen <thansen at debian.org>  Sun, 10 Sep 2017 20:58:23 +0100
+
 sagemath (8.0-6) unstable; urgency=medium
 
   * Use pkg-config to check for openblas|atlas in pruner, the old check
diff --git a/debian/control.runtime-depends b/debian/control.runtime-depends
index cf47732..91ff03a 100644
--- a/debian/control.runtime-depends
+++ b/debian/control.runtime-depends
@@ -82,7 +82,7 @@
  python-rpy2,
  python-sagenb (>= 1.0.1),
  python-sagenb-export (>= 3.2),
- python-scipy,
+ python-scipy (>= 0.19.1),
  python-setuptools,
  python-setuptools-scm,
  python-simplegeneric,
diff --git a/debian/patches/series b/debian/patches/series
index 75ee6c1..5986195 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,6 +1,7 @@
 # Patches already applied upstream or committed in a side branch
 # Or nearly-finished patches that they'll very probably merge.
 u0-version-cython-0.26.patch               #23360
+u0-version-scipy-0.19.1.patch              #23594
 
 # Patches that have open upstream tickets
 u1-version-pbori-boost1.62-hashes.patch    #22243
diff --git a/debian/patches/u0-version-scipy-0.19.1.patch b/debian/patches/u0-version-scipy-0.19.1.patch
new file mode 100644
index 0000000..baf5d9a
--- /dev/null
+++ b/debian/patches/u0-version-scipy-0.19.1.patch
@@ -0,0 +1,366 @@
+Bug: https://trac.sagemath.org/ticket/23594
+
+From 49485cde0a359147ac75ec5b887a6595d22e3bb0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fran=C3=A7ois=20Bissey?= <frp.bissey at gmail.com>
+Date: Tue, 8 Aug 2017 11:58:24 +1200
+Subject: Fixing easy to fix doctests
+
+---
+ src/doc/en/installation/source.rst      | 8 ++++++++
+ src/sage/coding/code_bounds.py          | 2 +-
+ src/sage/functions/exp_integral.py      | 2 +-
+ src/sage/matrix/matrix_double_dense.pyx | 4 ++--
+ 4 files changed, 12 insertions(+), 4 deletions(-)
+
+From 7c600a76e9003ead3ccfe7377a56a6b6dfb51ce5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fran=C3=A7ois=20Bissey?= <frp.bissey at gmail.com>
+Date: Tue, 8 Aug 2017 13:50:42 +1200
+Subject: Fix optimize doctest after checking that it still does what it
+ supposed to do. Results are more consistent between methods overall.
+
+---
+ src/sage/numerical/optimize.py | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+From cbe2690f4b4fa7bbb6cca1d263be5d90ea69f9e4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fran=C3=A7ois=20Bissey?= <frp.bissey at gmail.com>
+Date: Mon, 21 Aug 2017 11:24:31 +1200
+Subject: Use tolerance as much as possible in minimize
+
+---
+ src/sage/numerical/optimize.py | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+From 5cb42f1b8e0d6daf45078586e19a7cded50c2d9b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fran=C3=A7ois=20Bissey?= <frp.bissey at gmail.com>
+Date: Mon, 21 Aug 2017 11:45:06 +1200
+Subject: remove documentation changes meant for another ticket
+
+---
+ src/doc/en/installation/source.rst | 8 --------
+ 1 file changed, 8 deletions(-)
+
+From 88d70c68fbd6ba1fda52217b81a45c906aaf390e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fran=C3=A7ois=20Bissey?= <frp.bissey at gmail.com>
+Date: Mon, 28 Aug 2017 14:30:11 +1200
+Subject: Remove references to weave now that it is gone from scipy.
+
+---
+ .../numerical_sage/comparison_to_cython.rst        |   4 +-
+ .../thematic_tutorials/numerical_sage/ctypes.rst   |   8 +-
+ .../numerical_sage/parallel_laplace_solver.rst     |   5 +-
+ .../using_compiled_code_iteractively.rst           |   8 +-
+ .../en/thematic_tutorials/numerical_sage/weave.rst | 150 ---------------------
+ 5 files changed, 11 insertions(+), 164 deletions(-)
+ delete mode 100644 src/doc/en/thematic_tutorials/numerical_sage/weave.rst
+
+--- a/sage/src/sage/coding/code_bounds.py
++++ b/sage/src/sage/coding/code_bounds.py
+@@ -627,7 +627,7 @@
+ 
+         sage: from sage.coding.code_bounds import entropy_inverse
+         sage: entropy_inverse(0.1)
+-        0.012986862055848683
++        0.012986862055...
+         sage: entropy_inverse(1)
+         1/2
+         sage: entropy_inverse(0, 3)
+--- a/sage/src/sage/functions/exp_integral.py
++++ b/sage/src/sage/functions/exp_integral.py
+@@ -1170,7 +1170,7 @@
+ 
+         sage: f(x) = cosh_integral(x)
+         sage: find_root(f, 0.1, 1.0)
+-        0.523822571389482...
++        0.523822571389...
+ 
+     Compare ``cosh_integral(3.0)`` to the definition of the value using
+     numerical integration::
+--- a/sage/src/sage/matrix/matrix_double_dense.pyx
++++ b/sage/src/sage/matrix/matrix_double_dense.pyx
+@@ -1701,7 +1701,7 @@
+             sage: A.solve_right(b)
+             Traceback (most recent call last):
+             ...
+-            LinAlgError: singular matrix
++            LinAlgError: Matrix is singular.
+ 
+         The vector of constants needs the correct degree.  ::
+ 
+@@ -1841,7 +1841,7 @@
+             sage: A.solve_left(b)
+             Traceback (most recent call last):
+             ...
+-            LinAlgError: singular matrix
++            LinAlgError: Matrix is singular.
+ 
+         The vector of constants needs the correct degree.  ::
+ 
+--- a/sage/src/sage/numerical/optimize.py
++++ b/sage/src/sage/numerical/optimize.py
+@@ -289,14 +289,14 @@
+ 
+         sage: vars = var('x y z')
+         sage: f = 100*(y-x^2)^2+(1-x)^2+100*(z-y^2)^2+(1-y)^2
+-        sage: minimize(f, [.1,.3,.4])
+-        (1.00..., 1.00..., 1.00...)
++        sage: minimize(f, [.1,.3,.4]) # abs tol 1e-6
++        (1.0, 1.0, 1.0)
+ 
+     Try the newton-conjugate gradient method; the gradient and hessian are 
+     computed automatically::
+ 
+-        sage: minimize(f, [.1, .3, .4], algorithm="ncg")
+-        (0.9999999..., 0.999999..., 0.999999...)
++        sage: minimize(f, [.1, .3, .4], algorithm="ncg") # abs tol 1e-6
++        (1.0, 1.0, 1.0)
+ 
+     We get additional convergence information with the `verbose` option::
+ 
+@@ -309,8 +309,8 @@
+ 
+         sage: def rosen(x): # The Rosenbrock function
+         ....:    return sum(100.0r*(x[1r:]-x[:-1r]**2.0r)**2.0r + (1r-x[:-1r])**2.0r)
+-        sage: minimize(rosen, [.1,.3,.4])
+-        (1.00..., 1.00..., 1.00...)
++        sage: minimize(rosen, [.1,.3,.4]) # abs tol 3e-5
++        (1.0, 1.0, 1.0)
+ 
+     Same example with a pure Python function and a Python function to
+     compute the gradient::
+@@ -328,8 +328,8 @@
+         ....:    der[0] = -400r*x[0r]*(x[1r]-x[0r]**2r) - 2r*(1r-x[0])
+         ....:    der[-1] = 200r*(x[-1r]-x[-2r]**2r)
+         ....:    return der
+-        sage: minimize(rosen, [.1,.3,.4], gradient=rosen_der, algorithm="bfgs")
+-        (1.00...,  1.00..., 1.00...)
++        sage: minimize(rosen, [.1,.3,.4], gradient=rosen_der, algorithm="bfgs") # abs tol 1e-6
++        (1.0, 1.0, 1.0)
+     """
+     from sage.symbolic.expression import Expression
+     from sage.ext.fast_eval import fast_callable
+--- a/sage/src/doc/en/thematic_tutorials/numerical_sage/comparison_to_cython.rst
++++ b/sage/src/doc/en/thematic_tutorials/numerical_sage/comparison_to_cython.rst
+@@ -7,8 +7,8 @@
+ website you can find an example. One potential downside to Cython over
+ the previous solutions is it requires the user to understand how NumPy
+ arrays or Sage matrices are implemented so as to be able to access
+-their internal data. In contrast the weave, scipy, and ctypes examples
+-only require the user to know C or Fortran and from their perspective
++their internal data. In contrast the scipy, and ctypes examples only 
++require the user to know C or Fortran and from their perspective
+ the NumPy data magically gets passed to C or Fortran with no further
+ thought from them. In order for pyrex to be competitive as a way to
+ interactively write compiled code, the task of accessing the internal
+--- a/sage/src/doc/en/thematic_tutorials/numerical_sage/ctypes.rst
++++ b/sage/src/doc/en/thematic_tutorials/numerical_sage/ctypes.rst
+@@ -199,8 +199,8 @@
+ double. By default ctypes assumes the return values are ints. If
+ they are not you need to tell it by setting restype to the correct
+ return type. If you execute the above code, then solve(u) will
+-solve the system. It is comparable to the weave or fortran
+-solutions taking around .2 seconds. Alternatively you could do
++solve the system. It is comparable to the fortran solution taking 
++around .2 seconds. Alternatively you could do
+ 
+ ::
+ 
+@@ -209,8 +209,8 @@
+     laplace.solve_in_C(n.ctypes.data_as(c_void_p),n,n,dx,dx)
+ 
+ which computes the solution entirely in C. This is very fast.
+-Admittedly we could have had our fortran or weave routines do the
+-entire solution at the C/Fortran level and we would have the same
++Admittedly we could have had our fortran routines do the
++entire solution at the Fortran level and we would have the same
+ speed.
+ 
+ As I said earlier you can just as easily call a shared object
+--- a/sage/src/doc/en/thematic_tutorials/numerical_sage/parallel_laplace_solver.rst
++++ b/sage/src/doc/en/thematic_tutorials/numerical_sage/parallel_laplace_solver.rst
+@@ -126,6 +126,5 @@
+ takes only 6 seconds while the serial version we wrote earlier
+ takes 20 seconds.
+ 
+-Excercise: Rewrite the above using f2py or weave, so that each
+-process compiles a fortran or C timestep function and uses that,
+-how fast can you get this?
++Excercise: Rewrite the above using f2py, so that each process 
++compiles a fortran function and uses that, how fast can you get this?
+--- a/sage/src/doc/en/thematic_tutorials/numerical_sage/using_compiled_code_iteractively.rst
++++ b/sage/src/doc/en/thematic_tutorials/numerical_sage/using_compiled_code_iteractively.rst
+@@ -6,10 +6,9 @@
+ general. The exception is that these notes assume you are using Sage's
+ interface to f2py which makes it more convenient to work with f2py
+ interactively. You should look at the f2py website for information on
+-using the command line f2py tool. The ctypes and weave example will
+-work in any recent Python install (weave is not part of Python so you
+-will have to install it separately). If you are using Sage then weave,
+-ctypes, and f2py are all there already.
++using the command line f2py tool. The ctypes example will work in any 
++recent Python install. If you are using Sage, then ctypes and f2py are 
++all there already.
+ 
+ Firstly why would we want to write compiled code? Obviously, because
+ its fast, far faster than interpreted Python code.  Sage has very
+@@ -44,7 +43,6 @@
+ 
+    f2py
+    f2py_examples
+-   weave
+    ctypes
+    ctypes_examples
+    comparison_to_cython
+--- a/sage/src/doc/en/thematic_tutorials/numerical_sage/weave.rst
++++ /dev/null
+@@ -1,150 +0,0 @@
+-Weave
+-=====
+-
+-Weave is a tool that does for C/C++ what f2py does for fortran
+-(though we should note it is also possible to wrap C code using
+-f2py). Suppose we have some data stored in numpy arrays and we want
+-to write some C/C++ code to do something with that data that needs
+-to be fast. For a trivial example, let us write a function that
+-sums the contents of a numpy array
+-
+-::
+-
+-    sage: from scipy import weave
+-    doctest:...: DeprecationWarning: `scipy.weave` is deprecated, use `weave` instead!
+-    sage: from scipy.weave import converters
+-
+-::
+-
+-    def my_sum(a):
+-        n=int(len(a))
+-        code="""
+-        int i;
+-        long int counter;
+-        counter =0;
+-        for(i=0;i<n;i++)
+-        {
+-            counter=counter+a(i);
+-        }
+-        return_val=counter;
+-        """
+-
+-        err=weave.inline(code,['a','n'],type_converters=converters.blitz,compiler='gcc')
+-        return err
+-
+-To call this function do ::
+-
+-    import numpy
+-    a = numpy.array(range(60000))
+-    time my_sum(a)
+-    time sum(range(60000))
+-
+-The first time the weave code executes the code is compiled, from
+-then on, the execution is immediate. You should find that python's
+-built-in sum function is comparable in speed to what we just wrote.
+-Let us explain some things about this example. As you can see, to
+-use weave you create a string containing pure C/C++ code. Then you
+-call weave.inline on it. You pass to weave the string with the
+-code, as well as a list of python object that it is to
+-automatically convert to C variables. So in our case we can refer
+-to the python objects :math:`a` and :math:`n` inside of weave.
+-Numpy arrays are accessed by :math:`a(i)` if they are
+-one-dimensional or :math:`a(i,j)` if they are two dimensional. Of
+-course we cannot use just any python object, currently weave knows
+-about all python numerical data types such as ints and floats, as
+-well as numpy arrays. Note that numpy arrays do not become pointers
+-in the C code (which is why they are accessed by ( ) and not [ ].
+-If you need a pointer you should copy the data into a pointer. Next
+-is a more complicated example that calls lapack to solve a linear
+-system ax=b.
+-
+-::
+-
+-    def weave_solve(a,b):
+-        n = len(a[0])
+-        x = numpy.array([0]*n,dtype=float)
+-
+-        support_code="""
+-        #include <stdio.h>
+-        extern "C" {
+-        void dgesv_(int *size, int *flag,double* data,int*size,
+-                    int*perm,double*vec,int*size,int*ok);
+-        }
+-        """
+-
+-        code="""
+-            int i,j;
+-            double* a_c;
+-            double* b_c;
+-            int size;
+-            int flag;
+-            int* p;
+-            int ok;
+-            size=n;
+-            flag=1;
+-            a_c= (double *)malloc(sizeof(double)*n*n);
+-            b_c= (double *)malloc(sizeof(double)*n);
+-            p = (int*)malloc(sizeof(int)*n);
+-            for(i=0;i<n;i++)
+-               {
+-               b_c[i]=b(i);
+-               for(j=0;j<n;j++)
+-                 a_c[i*n+j]=a(i,j);
+-               }
+-            dgesv_(&size,&flag,a_c,&size,p,b_c,&size,&ok);
+-            for(i=0;i<n;i++)
+-               x(i)=b_c[i];
+-            free(a_c);
+-            free(b_c);
+-            free(p);
+-        """
+-
+-        libs=['lapack','blas','g2c']
+-        dirs=['/media/sdb1/sage-2.6.linux32bit-i686-Linux']
+-        vars = ['a','b','x','n']
+-        weave.inline(code,vars,support_code=support_code,libraries=libs,library_dirs=dirs,  \
+-        type_converters=converters.blitz,compiler='gcc')
+-        return x
+-
+-
+-Note that we have used the support_code argument which is additional C code you can
+-use to include headers and declare functions. Note that inline also can take all distutils
+-compiler options which we used here to link in lapack.
+-
+-::
+-
+-    def weaveTimeStep(u,dx,dy):
+-        """Takes a time step using inlined C code -- this version uses
+-        blitz arrays."""
+-        nx, ny = u.shape
+-        dx2, dy2 = dx**2, dy**2
+-        dnr_inv = 0.5/(dx2 + dy2)
+-
+-        code = """
+-               double tmp, err, diff,dnr_inv_;
+-               dnr_inv_=dnr_inv;
+-               err = 0.0;
+-               for (int i=1; i<nx-1; ++i) {
+-                   for (int j=1; j<ny-1; ++j) {
+-                       tmp = u(i,j);
+-                       u(i,j) = ((u(i-1,j) + u(i+1,j))*dy2 +
+-                                 (u(i,j-1) + u(i,j+1))*dx2)*dnr_inv_;
+-                       diff = u(i,j) - tmp;
+-                       err += diff*diff;
+-                   }
+-               }
+-               return_val = sqrt(err);
+-               """
+-        # compiler keyword only needed on windows with MSVC installed
+-        err = weave.inline(code, ['u', 'dx2', 'dy2', 'dnr_inv', 'nx','ny'],
+-                           type_converters = converters.blitz,
+-                           compiler = 'gcc')
+-        return u,err
+-
+-
+-Using our previous driver you should find that this version takes about the
+-same amount of time as the f2py version around .2 seconds to do 2750
+-iterations.
+-
+-For more about weave see
+-http://www.scipy.org/Weave
diff --git a/debian/patches/u2-fix-less-trivial-test-cases.patch b/debian/patches/u2-fix-less-trivial-test-cases.patch
index 76c89ef..9e2699b 100644
--- a/debian/patches/u2-fix-less-trivial-test-cases.patch
+++ b/debian/patches/u2-fix-less-trivial-test-cases.patch
@@ -4,35 +4,9 @@ Description: Fix some more test cases with less-trivial failures
 Author: Ximin Luo <infinity0 at debian.org>
 Forwarded: TODO
 ---
-src/sage/functions/exp_integral.py
-https://www.wolframalpha.com/input/?i=root+of+Chi(z)+between+0.1+and+1.0
-The overridden value is actually more correct, at least according to Wolfram
----
 src/sage/rings/integer.pyx
 Sage upstream expects MPIR's error output but we use GMP.
 They know about this difference and advise us to just override it.
----
-src/sage/numerical/optimize.py
-Probably because we use slightly different versions of various num libraries.
-doctest's wildcard "..." only matches against errors-greater-than but our
-errors-less-than are also at a level of precision that satisfies what upstream
-seems to care about (based on the number of sig-figs before the ...).
----
-src/sage/coding/code_bounds.py
-https://www.wolframalpha.com/input/?i=root+of+-xlog(x)%2Flog(2)+-+(1-x)log(1-x)%2Flog(2)+-+0.1+between+0+and+0.5
-According to Wolfram both answers are slightly wrong so just override with ours
-to reduce the noise in our test results.
---- a/sage/src/sage/functions/exp_integral.py
-+++ b/sage/src/sage/functions/exp_integral.py
-@@ -1170,7 +1170,7 @@
- 
-         sage: f(x) = cosh_integral(x)
-         sage: find_root(f, 0.1, 1.0)
--        0.523822571389482...
-+        0.5238225713898644...
- 
-     Compare ``cosh_integral(3.0)`` to the definition of the value using
-     numerical integration::
 --- a/sage/src/sage/rings/integer.pyx
 +++ b/sage/src/sage/rings/integer.pyx
 @@ -6093,8 +6093,7 @@
@@ -45,34 +19,3 @@ to reduce the noise in our test results.
          """
          cdef long n
  
---- a/sage/src/sage/numerical/optimize.py
-+++ b/sage/src/sage/numerical/optimize.py
-@@ -290,7 +290,7 @@
-         sage: vars = var('x y z')
-         sage: f = 100*(y-x^2)^2+(1-x)^2+100*(z-y^2)^2+(1-y)^2
-         sage: minimize(f, [.1,.3,.4])
--        (1.00..., 1.00..., 1.00...)
-+        (0.9999999..., 0.999999..., 0.999999...)
- 
-     Try the newton-conjugate gradient method; the gradient and hessian are 
-     computed automatically::
-@@ -329,7 +329,7 @@
-         ....:    der[-1] = 200r*(x[-1r]-x[-2r]**2r)
-         ....:    return der
-         sage: minimize(rosen, [.1,.3,.4], gradient=rosen_der, algorithm="bfgs")
--        (1.00...,  1.00..., 1.00...)
-+        (0.9999999..., 0.999999..., 0.999999...)
-     """
-     from sage.symbolic.expression import Expression
-     from sage.ext.fast_eval import fast_callable
---- a/sage/src/sage/coding/code_bounds.py
-+++ b/sage/src/sage/coding/code_bounds.py
-@@ -627,7 +627,7 @@
- 
-         sage: from sage.coding.code_bounds import entropy_inverse
-         sage: entropy_inverse(0.1)
--        0.012986862055848683
-+        0.012986862055...
-         sage: entropy_inverse(1)
-         1/2
-         sage: entropy_inverse(0, 3)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/sagemath.git



More information about the debian-science-commits mailing list