[r-cran-elliptic] 01/01: Imported Upstream version 1.3-5

Jonathon Love jon at thon.cc
Fri Aug 28 13:52:58 UTC 2015


This is an automated email from the git hooks/post-receive script.

jonathon-guest pushed a commit to branch master
in repository r-cran-elliptic.

commit d459f6de52420f76b8e79bfbb1e1e424e146c651
Author: Jonathon Love <jon at thon.cc>
Date:   Fri Aug 28 15:49:10 2015 +0200

    Imported Upstream version 1.3-5
---
 DESCRIPTION                  |   19 +
 MD5                          |   58 ++
 NAMESPACE                    |    4 +
 R/elliptic.R                 | 2102 ++++++++++++++++++++++++++++++++++++++++++
 build/vignette.rds           |  Bin 0 -> 271 bytes
 demo/00Index                 |    1 +
 demo/elliptic.R              |   87 ++
 inst/CITATION                |   28 +
 inst/doc/ellipticpaper.R     |  765 +++++++++++++++
 inst/doc/ellipticpaper.Rnw   | 1421 ++++++++++++++++++++++++++++
 inst/doc/ellipticpaper.pdf   |  Bin 0 -> 991842 bytes
 inst/doc/residuetheorem.R    |   70 ++
 inst/doc/residuetheorem.Rnw  |  162 ++++
 inst/doc/residuetheorem.pdf  |  Bin 0 -> 44878 bytes
 man/J.Rd                     |   62 ++
 man/K.fun.Rd                 |   33 +
 man/P.laurent.Rd             |   38 +
 man/WeierstrassP.Rd          |  118 +++
 man/amn.Rd                   |   26 +
 man/as.primitive.Rd          |   52 ++
 man/ck.Rd                    |   38 +
 man/congruence.Rd            |   64 ++
 man/coqueraux.Rd             |   39 +
 man/divisor.Rd               |   72 ++
 man/e16.28.1.Rd              |   38 +
 man/e18.10.9.Rd              |   42 +
 man/e1e2e3.Rd                |   99 ++
 man/elliptic-package.Rd      |  124 +++
 man/equianharmonic.Rd        |   51 +
 man/eta.Rd                   |   40 +
 man/farey.Rd                 |   40 +
 man/fpp.Rd                   |   53 ++
 man/g.fun.Rd                 |  118 +++
 man/half.periods.Rd          |   64 ++
 man/latplot.Rd               |   32 +
 man/lattice.Rd               |   19 +
 man/limit.Rd                 |   33 +
 man/massage.Rd               |   21 +
 man/misc.Rd                  |   26 +
 man/mob.Rd                   |   41 +
 man/myintegrate.Rd           |  167 ++++
 man/near.match.Rd            |   22 +
 man/newton_raphson.Rd        |   56 ++
 man/nome.Rd                  |   28 +
 man/p1.tau.Rd                |   34 +
 man/parameters.Rd            |  103 +++
 man/pari.Rd                  |   53 ++
 man/sn.Rd                    |  113 +++
 man/sqrti.Rd                 |   20 +
 man/theta.Rd                 |   92 ++
 man/theta.neville.Rd         |   77 ++
 man/theta1.dash.zero.Rd      |   31 +
 man/theta1dash.Rd            |   44 +
 man/unimodular.Rd            |   47 +
 man/view.Rd                  |  123 +++
 tests/aaa.R                  |  207 +++++
 vignettes/elliptic.bib       |  199 ++++
 vignettes/ellipticpaper.Rnw  | 1421 ++++++++++++++++++++++++++++
 vignettes/residuetheorem.Rnw |  162 ++++
 59 files changed, 9099 insertions(+)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000..182cbce
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,19 @@
+Package: elliptic
+Version: 1.3-5
+Date: 2008-03-23
+Title: elliptic functions
+Author: Robin K. S. Hankin
+Depends: R (>= 2.5.0)
+Imports: MASS
+Suggests: emulator, calibrator
+SystemRequirements: pari/gp
+Description: 
+ A suite of elliptic and related functions including Weierstrass and
+ Jacobi forms.  Also includes various tools for manipulating and
+ visualizing complex functions.
+Maintainer: Robin K. S. Hankin <hankin.robin at gmail.com>
+License: GPL-2
+Packaged: 2014-11-05 20:32:42 UTC; rhankin
+NeedsCompilation: no
+Repository: CRAN
+Date/Publication: 2014-11-06 00:39:37
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..3e3c687
--- /dev/null
+++ b/MD5
@@ -0,0 +1,58 @@
+d14124ae2fee6b3bd83c70922d844c51 *DESCRIPTION
+58d30ed543a431a90d2f5d4ea8f2d33c *NAMESPACE
+e8822a203d9e41d870b0d4ae24c66df2 *R/elliptic.R
+1602e86e4650b2b639d5dc4caf9741c7 *build/vignette.rds
+33676cd6d88cb688d9fed000bf4fda10 *demo/00Index
+2fad1475ef433f55b12f747f376de850 *demo/elliptic.R
+49042829f5eebfb2383ec97a417361d6 *inst/CITATION
+05d333c134743eca94eb3ccb6e7c4b36 *inst/doc/ellipticpaper.R
+96a9055539a08c9d2a55d5b713c91dad *inst/doc/ellipticpaper.Rnw
+0422074a8e98c5b5bf7fe21517c0fcbd *inst/doc/ellipticpaper.pdf
+2a386924859cb939fbc55ac4d15e33d1 *inst/doc/residuetheorem.R
+49328f4f870d3669d140832cb1af006b *inst/doc/residuetheorem.Rnw
+e335188c4bf71dfcc0f10d8534a30318 *inst/doc/residuetheorem.pdf
+6d57317c47182f7e6c6990c9fefecc7c *man/J.Rd
+4ee35fa06bff45519dbbfc858b069379 *man/K.fun.Rd
+4f68fe7f2efb36db1c00787c5ee06306 *man/P.laurent.Rd
+92aa569a7274b89e406102111bcf0f97 *man/WeierstrassP.Rd
+0c70ae3f4c994da774dc7e1460e25246 *man/amn.Rd
+03b1c420f796fcb5bc2f2646187b1174 *man/as.primitive.Rd
+fc61699b95b6cb2e24336e1ef926c0f0 *man/ck.Rd
+4e2c641979fc70ecf00dc8d10a9ac857 *man/congruence.Rd
+e66543f5901fb5829098f41382e1a640 *man/coqueraux.Rd
+bc6567cdef8cf6e1d85b75f2c0ba07c1 *man/divisor.Rd
+22fe69063992cc46e90cb54342500037 *man/e16.28.1.Rd
+986b16988ddbe8bbeb4c559af6f58280 *man/e18.10.9.Rd
+c22f2413c9fab7a1c82e344f8b940d24 *man/e1e2e3.Rd
+ab7398394a2eb0f3b5ad71f52ac67812 *man/elliptic-package.Rd
+26bcf9ff3a24afb73d893a1dc820925d *man/equianharmonic.Rd
+b33b6f04011d078657b134cfb9af0d3c *man/eta.Rd
+57ca0330ed43977c1fb587aa0d952b9a *man/farey.Rd
+4712c1d07fa004df5504135dcc13366d *man/fpp.Rd
+08a72a6f14dff065ff1a6e3528122bfc *man/g.fun.Rd
+2b4ddd87f996c588b7390477c4b0b4b9 *man/half.periods.Rd
+1f1de93b8af52454e56fc7e092525915 *man/latplot.Rd
+07b5efd49ab27cae2d7e61f891f7c591 *man/lattice.Rd
+7cfda4454430f8bf841c8093b814cee8 *man/limit.Rd
+05e24c3ee5d67d05e8fe5a82f1bc0b10 *man/massage.Rd
+4f28e593419428ed467140b3621d32ad *man/misc.Rd
+e3ad7db60e2f2abe946fdea47b407d77 *man/mob.Rd
+4e47b2e0c4885d695bec019ad432100c *man/myintegrate.Rd
+cc8872603a253c670698ccf76de64218 *man/near.match.Rd
+2689f116ecd4704d232ab3f74b1146dd *man/newton_raphson.Rd
+3eab2d18d2f0cf0b6cedaecbc318a9a8 *man/nome.Rd
+cbb16bc761a9e8550c9265dcdf5003aa *man/p1.tau.Rd
+7cd372b496880dd04f7063b229a82c5f *man/parameters.Rd
+4b5297191e263a270b95ae339b17e027 *man/pari.Rd
+be5220b133aca94a183085a57dd9036d *man/sn.Rd
+1f3f2d56ab9cad1c711f1ed54670a4ca *man/sqrti.Rd
+b5d9a73e37ef7e670b7157028db01fba *man/theta.Rd
+9a0da24113872e56258764cd54b2ec2a *man/theta.neville.Rd
+a46d76bfbc13910c163eaae692077cbe *man/theta1.dash.zero.Rd
+e328a677293830dcb5d2754adc7be591 *man/theta1dash.Rd
+b71334c1419c03a1329b78ef16874c4d *man/unimodular.Rd
+a2f1f5a7fcb802ca446266c52af52a5f *man/view.Rd
+494e8a6e3ef679beb5bbcfb50683aa22 *tests/aaa.R
+5f61069431448041d4324974f1a32bae *vignettes/elliptic.bib
+96a9055539a08c9d2a55d5b713c91dad *vignettes/ellipticpaper.Rnw
+49328f4f870d3669d140832cb1af006b *vignettes/residuetheorem.Rnw
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..2310c07
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,4 @@
+exportPattern("^[[:alpha:]]+")
+exportPattern("%mob%")
+importFrom(MASS,fractions)
+importFrom(MASS,rational)
diff --git a/R/elliptic.R b/R/elliptic.R
new file mode 100644
index 0000000..3dd84a0
--- /dev/null
+++ b/R/elliptic.R
@@ -0,0 +1,2102 @@
+"amn" <-
+function (u) 
+{
+    "asub" <- function(m, n) {
+        if ((m < 0) | (n < 0)) {
+            return(0)
+        }
+        else {
+            return(a[n + 1, m + 1])
+        }
+    }
+    index <- cbind(unlist(sapply(1:u, function(i) {
+        1:i
+    })), unlist(sapply(1:u, function(i) {
+        i:1
+    })))
+    a <- matrix(0, u, u)
+    a[1, 1] <- 1
+    for (i in 2:nrow(index)) {
+        m <- index[i, 2] - 1
+        n <- index[i, 1] - 1
+        a[index[i, , drop = FALSE]] <- 3 * (m + 1) * asub(m + 
+            1, n - 1) + 16/3 * (n + 1) * asub(m - 2, n + 1) - 
+            1/3 * (2 * m + 3 * n - 1) * (4 * m + 6 * n - 1) * 
+                asub(m - 1, n)
+    }
+    return(a)
+}
+"as.primitive" <-
+function (p, n = 3, tol = 1e-05, give.answers = FALSE) 
+{
+    if (class(p) == "primitive") {
+        return(p)
+    }
+    tau <- p[2]/p[1]
+    if (Im(tau) == 0) {
+        stop("period ratio real")
+    }
+    jj <- as.matrix(expand.grid(0:n, n:-n))[-n * (n + 1) - 1, 
+        ]
+    magnitudes <- abs(jj %*% p)
+    o <- order(magnitudes, abs(jj[, 1] - 1), abs(jj[, 2]))
+    first.row <- jj[o[1], ]
+    found.second.row <- FALSE
+    for (i in 2:nrow(jj)) {
+        second.row.candidate <- jj[o[i], ]
+        jj.matrix <- cbind(first.row, second.row.candidate)
+        if (abs(det(jj.matrix)) > tol) {
+            second.row <- second.row.candidate
+            found.second.row <- TRUE
+            break()
+        }
+    }
+    if (!found.second.row) {
+        stop("unimodular transformation out of range.  Try increaing n")
+    }
+    M <- rbind(first.row, second.row)
+    p.prim <- M %*% p
+    if (Re(p.prim[1]) < 0) {
+        p.prim[1] <- -p.prim[1]
+        M[1, ] <- -M[1, ]
+    }
+    if (Im(p.prim[2]/p.prim[1]) < -tol) {
+        M[2, ] <- -M[2, ]
+    }
+    out <- as.vector(M %*% p)
+    class(out) <- "primitive"
+    if (give.answers) {
+        return(list(M = M, p = out, mags = abs(out)))
+    }
+    else {
+        return(out)
+    }
+}
+"cc" <-
+function (u, m, ...) 
+{
+    1
+}
+"cd" <-
+function (u, m, ...) 
+{
+    theta.c(u, m = m, ...)/theta.d(u, m = m, ...)
+}
+"ck" <-
+function (g, n = 20) 
+{
+    if (n < 3) {
+        stop("error: n must be >3")
+    }
+    out <- rep(0, n)
+    g2 <- g[1]
+    g3 <- g[2]
+    out[1] <- 0
+    out[2] <- g2/20
+    out[3] <- g3/28
+    for (k in 4:n) {
+        out[k] <- 3/((2 * k + 1) * (k - 3)) * sum(out[2:(k - 
+            2)] * out[(k - 2):2])
+    }
+    return(out)
+}
+"cn" <-
+function (u, m, ...) 
+{
+    theta.c(u, m = m, ...)/theta.n(u, m = m, ...)
+}
+"congruence" <-
+function (a, l = 1) 
+{
+    l <- as.integer(l)
+    m <- as.integer(a[1])
+    n <- as.integer(a[2])
+    zero <- as.integer(0)
+    one <- as.integer(1)
+    if (m == zero & n == one) {
+        return(NULL)
+    }
+    if (m == one & n == zero) {
+        return(c(NA, 1))
+    }
+    if (m == 1) {
+        return(rbind(c(a),c(1, n + l), c(0, l)))
+    }
+    if (n == 1) {
+        return(rbind(c(a),c(m - l, 1), c(l, 0)))
+    }
+    q1 <- which((+l + (1:m) * n)%%m == zero)
+    if (!any(q1)) {
+        q1 <- NA
+    }
+    q2 <- which((-l + (1:n) * m)%%n == zero)
+    if (!any(q2)) {
+        q2 <- NA
+    }
+    out <- rbind(a, cbind(q1, q2))
+    rownames(out) <- NULL
+    colnames(out) <- NULL
+    return(out)
+}
+"coqueraux" <-
+function (z, g, N = 5, use.fpp = FALSE, give = FALSE) 
+{
+    if (use.fpp) {
+        if (class(g) != "parameters") {
+            g <- parameters(g = g)
+            g2 <- g$g[1]
+            g3 <- g$g[2]
+        }
+        z <- fpp(z, 2 * g$Omega)
+    }
+    if (class(g) != "parameters") {
+        g2 <- g[1]
+        g3 <- g[2]
+    }
+    else {
+        g2 <- g$g[1]
+        g3 <- g$g[2]
+    }
+    z0 <- z/2^N
+    z0.sq <- z0^2
+    out <- 1/z0.sq + z0.sq * (g2/20 + z0.sq * g3/28)
+    for (i in 1:N) {
+        out <- -2 * out + (6 * out^2 - g2/2)^2/(4 * (4 * out^3 - 
+            g2 * out - g3))
+    }
+    if (give) {
+        error <- abs(g2^2)/2450/2^(8 * N) * abs(z)^9 * sqrt(abs(4 * 
+            out^3 - g2 * out - g3))
+        return(list(out = out, error = error))
+    }
+    else {
+        return(out)
+    }
+}
+"cs" <-
+function (u, m, ...) 
+{
+    theta.c(u, m = m, ...)/theta.s(u, m = m, ...)
+}
+"dc" <-
+function (u, m, ...) 
+{
+    theta.d(u, m = m, ...)/theta.c(u, m = m, ...)
+}
+"dd" <-
+function (u, m, ...) 
+{
+    theta.d(u, m = m, ...)/theta.d(u, m = m, ...)
+}
+"divisor" <-
+function (n, k = 1) 
+{
+    if (length(n) > 1) {
+        return(sapply(n, match.fun(sys.call()[[1]]), k = k))
+    }
+    if (n == 1) {
+        return(1)
+    }
+    x <- factorize(n)
+    xx <- unique(x)
+    jj <- rbind(vals = xx, cnts = tabulate(match(x, xx)))
+    f <- function(u) {
+        p <- u[1]
+        alpha <- u[2]
+        (p^((alpha + 1) * k) - 1)/(p^k - 1)
+    }
+    if (k > 0) {
+        return(as.integer(prod(apply(jj, 2, f))))
+    }
+    else {
+        return(as.integer(prod(1 + jj[2, ])))
+    }
+}
+
+"liouville" <- function(n){
+out <- ifelse(sapply(factorize(n), length)%%2, -1L, 1L)
+out[n==1] <- 1L
+return(out)
+}
+  
+"dn" <-
+function (u, m, ...) 
+{
+    theta.d(u, m = m, ...)/theta.n(u, m = m, ...)
+}
+"ds" <-
+function (u, m, ...) 
+{
+    theta.d(u, m = m, ...)/theta.s(u, m = m, ...)
+}
+"e16.28.1" <-
+function (z, m, ...) 
+{
+    theta1(z, m = m, ...)^2 * theta4(0, m = m, ...)^2 - theta3(z, 
+        m = m, ...)^2 * theta2(0, m = m, ...)^2 + theta2(z, m = m, 
+        ...)^2 * theta3(0, m = m, ...)^2
+}
+"e16.28.2" <-
+function (z, m, ...) 
+{
+    theta2(z, m = m, ...)^2 * theta4(0, m = m, ...)^2 - theta4(z, 
+        m = m, ...)^2 * theta2(0, m = m, ...)^2 + theta1(z, m = m, 
+        ...)^2 * theta3(0, m = m, ...)^2
+}
+"e16.28.3" <-
+function (z, m, ...) 
+{
+    theta3(z, m = m, ...)^2 * theta4(0, m = m, ...)^2 - theta4(z, 
+        m = m, ...)^2 * theta3(0, m = m, ...)^2 + theta1(z, m = m, 
+        ...)^2 * theta2(0, m = m, ...)^2
+}
+"e16.28.4" <-
+function (z, m, ...) 
+{
+    theta4(z, m = m, ...)^2 * theta4(0, m = m, ...)^2 - theta3(z, 
+        m = m, ...)^2 * theta3(0, m = m, ...)^2 + theta2(z, m = m, 
+        ...)^2 * theta2(0, m = m, ...)^2
+}
+"e16.28.5" <-
+function (m, ...) 
+{
+    theta2(0, m = m, ...)^4 + theta4(0, m = m, ...)^4 - theta3(0, 
+        m = m, ...)^4
+}
+"e16.36.6a" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    2 * K * theta1(v, m = m, ...)/(pi * theta2(0, m = m, ...) * 
+        theta3(0, m = m, ...) * theta4(0, m = m, ...))
+}
+"e16.36.6b" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    theta2(v, m = m, ...)/theta2(0, m = m, ...)
+}
+"e16.36.7a" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    theta3(v, m = m, ...)/theta3(0, m = m, ...)
+}
+"e16.36.7b" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    theta4(v, m = m, ...)/theta4(0, m = m, ...)
+}
+"e16.37.1" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 1
+    for (n in 1:maxiter) {
+        out.new <- out * (1 - 2 * q^(2 * n) * cos(2 * v) + q^(4 * 
+            n))
+        if (near.match(out, out.new)) {
+            return((16 * q/(m * (1 - m)))^(1/6) * sin(v) * out)
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.37.2" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 1
+    for (n in 1:maxiter) {
+        out.new <- out * (1 + 2 * q^(2 * n) * cos(2 * v) + q^(4 * 
+            n))
+        if (near.match(out, out.new)) {
+            return((16 * q * sqrti(1 - m)/m)^(1/6) * cos(v) * 
+                out)
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.37.3" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 1
+    for (n in 1:maxiter) {
+        out.new <- out * (1 + 2 * q^(2 * n - 1) * cos(2 * v) + 
+            q^(4 * n - 2))
+        if (near.match(out, out.new)) {
+            return((m * (1 - m)/(16 * q))^(1/12) * out)
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.37.4" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 1
+    for (n in 1:maxiter) {
+        out.new <- out * (1 - 2 * q^(2 * n - 1) * cos(2 * v) + 
+            q^(4 * n - 2))
+        if (near.match(out, out.new)) {
+            return((m/(16 * q * (1 - m)^2))^(1/12) * out)
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.38.1" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + (-1)^n * q^(n * (n + 1)) * sin((2 * 
+            n + 1) * v)
+        if (near.match(out, out.new)) {
+            return(out.new * sqrt(2 * pi * sqrt(q)/(sqrti(m) * 
+                sqrti(1 - m) * K)))
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.38.2" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + q^(n * (n + 1)) * cos((2 * n + 1) * 
+            v)
+        if (near.match(out, out.new)) {
+            return(out.new * sqrt(2 * pi * sqrti(q)/(sqrti(m) * 
+                K)))
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.38.3" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 0
+    for (n in 1:maxiter) {
+        out.new <- out + q^(n * n) * cos(2 * n * v)
+        if (near.match(out, out.new)) {
+            return((1 + 2 * out.new) * sqrt(pi/(2 * K)))
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e16.38.4" <-
+function (u, m, maxiter = 30) 
+{
+    q <- nome(m)
+    K <- K.fun(m)
+    v <- pi * u/(2 * K)
+    out <- 0
+    for (n in 1:maxiter) {
+        out.new <- out + (-1)^n * q^(n * n) * cos(2 * n * v)
+        if (near.match(out, out.new)) {
+            return((1 + 2 * out.new) * sqrt(pi/(2 * sqrti(1 - 
+                m) * K)))
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"e18.10.9" <-
+function (parameters) 
+{
+    Omega <- parameters$Omega
+    e <- parameters$e
+    out <- rep(NA, 3)
+    q <- exp((1i) * pi * Omega[2]/Omega[1])
+    out[1] <- 12 * Omega[1]^2 * e[1] - pi^2 * (theta3(z = 0, 
+        q = q)^4 + theta4(z = 0, q = q)^4)
+    out[2] <- 12 * Omega[1]^2 * e[2] - pi^2 * (theta2(z = 0, 
+        q = q)^4 - theta4(z = 0, q = q)^4)
+    out[3] <- 12 * Omega[1]^2 * e[3] + pi^2 * (theta2(z = 0, 
+        q = q)^4 + theta3(z = 0, q = q)^4)
+    return(out)
+}
+"e1e2e3" <-
+function (g, use.laurent = TRUE, AnS = is.double(g), Omega = NULL, 
+    tol = 1e-06) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    e <- polyroot(c(-g3, -g2, 0, 4))
+    if (AnS) {
+        if (!is.double(g)) {
+            stop("AnS only consider real g")
+        }
+        Delta <- g2^3 - 27 * g3^2
+        if (Delta < 0) {
+            pos.im <- which.max(Im(e))
+            neg.im <- which.min(Im(e))
+            e <- c(e[pos.im], e[-c(pos.im, neg.im)], e[neg.im])
+            e <- massage(e)
+        }
+        else {
+            e <- sort(e, decreasing = TRUE)
+            e <- Re(e)
+        }
+        names(e) <- c("e1", "e2", "e3")
+        return(e)
+    }
+    out.accurate <- e
+    if (use.laurent) {
+        e.approx <- rep(NA, 3)
+        out <- e.approx
+        if (is.null(Omega)) {
+            Omega <- half.periods(e = e)
+        }
+        if (AnS) {
+            e.approx[1] <- P.laurent(z = Omega[1], g = g, tol = tol)
+            e.approx[2] <- P.laurent(z = Omega[2], g = g, tol = tol)
+            e.approx[3] <- P.laurent(z = fpp(sum(Omega), Omega * 
+                2), g = g, tol = tol)
+        }
+        else {
+            e.approx[1] <- coqueraux(z = Omega[1], g = g)
+            e.approx[3] <- coqueraux(z = Omega[2], g = g)
+            e.approx[2] <- -e.approx[1] - e.approx[3]
+        }
+        out[1] <- out.accurate[which.min(abs(e - e.approx[1]))]
+        out[2] <- out.accurate[which.min(abs(e - e.approx[2]))]
+        out[3] <- out.accurate[which.min(abs(e - e.approx[3]))]
+    }
+    names(out) <- c("e1", "e2", "e3")
+    return(out)
+}
+"eee.cardano" <-
+function (g) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    Delta <- g3^2 - g2^3/27
+    epsilon <- exp(pi * (0+2i)/3)
+    if (g2 == 0) {
+        e1 <- (g3/4)^(1/3)
+        return(c(e1 = e1, e2 = epsilon * e1, e3 = e1/epsilon))
+    }
+    if (g3 == 0) {
+        e1 <- g2^(1/2)/2
+        return(c(e1 = e1, e2 = -e1, e3 = 0))
+    }
+    alpha <- (g2/12)^(1/2)
+    beta <- (g3 + sqrt(Delta))^(1/3)/(2 * alpha)
+    gamma <- 1/beta
+    e1 <- alpha * (beta + gamma)
+    e2 <- alpha * (epsilon * beta + gamma/epsilon)
+    e3 <- alpha * (1/epsilon * beta + gamma * epsilon)
+    return(c(e1, e2, e3))
+}
+"equianharmonic" <-
+function (...) 
+{
+    jj <- gamma(1/3)^3/(4 * pi)
+    omega1 <- jj/2 - (1i) * jj * sqrt(3)/2
+    omega2 <- Conj(omega1)
+    Omega <- c(omega1, omega2)
+    epsilon <- exp(pi * (1i)/3)
+    e <- c(4^(-1/3) * epsilon^2, 4^(-1/3), 4^(-1/3) * epsilon^(-2))
+    names(e) <- c("e1", "e2", "e3")
+    eta <- epsilon * pi/(2 * omega2 * sqrt(3))
+    etadash <- -epsilon^(-1) * pi/(2 * omega2 * sqrt(3))
+    Eta <- c(etadash, eta - etadash, -eta)
+    out <- list(Omega = Omega, q = exp(pi * (1i) * omega2/omega1), 
+        e = e, g = c(g2 = 0, g3 = 1), Delta = -27, Eta = Eta, 
+        is.AnS = TRUE, given = "d")
+    class(out) <- "parameters"
+    return(out)
+}
+"eta" <-
+function (z, ...) 
+{
+    f <- function(u) {
+        q <- exp(pi * (1i) * u * 3)
+        return(theta3((1/2 + u/2) * pi, q = q, ...))
+    }
+    out <- sapply(z, f) * exp(pi * (1i) * z/12)
+    attributes(out) <- attributes(z)
+    return(out)
+}
+"eta.series" <-
+function (z, maxiter = 300) 
+{
+    jj <- 1
+    for (n in 1:maxiter) {
+        jj <- jj * (1 - exp(2 * pi * (1i) * n * z))
+    }
+    return(exp(pi * (1i) * z/12) * jj)
+}
+"factorize" <-
+function (n) 
+{
+    if (!is.numeric(n)) 
+        stop("cannot factorize non-numeric arguments")
+    if (length(n) > 1) {
+        l <- list()
+        for (i in seq(along = n)) l[[i]] <- Recall(n[i])
+        return(l)
+    }
+    if (n != round(n) || n < 2) 
+        return(n)
+    tab <- primes(n)
+    fac <- numeric(0)
+    while (length(tab <- tab[n%%tab == 0]) > 0) {
+        n <- n/prod(tab)
+        fac <- c(fac, tab)
+    }
+    as.integer(sort(fac))
+}
+"farey" <-
+function (n, print = FALSE, give.series = FALSE) 
+{
+    a <- outer(0:n, 0:n, "/")
+    a <- as.vector(a[is.finite(a) & a <= 1])
+    a <- MASS::fractions(unique(MASS::rational(sort(a))))
+    a <- attributes(a)$fracs
+    a[1] <- "0/1"
+    a[length(a)] <- "1/1"
+    if (print) {
+        print(a)
+        return(invisible(a))
+    }
+    a <- strsplit(a, "/")
+    a <- do.call("rbind", a)
+    mode(a) <- "integer"
+    if (give.series) {
+        colnames(a) <- c("num", "den")
+        return(a)
+    }
+    if (n == 0) {
+        return(c(0, 1, 1, 0))
+    }
+    if (n == 1) {
+        return(c(0, 1, 1, 1))
+    }
+    r <- nrow(a)
+    a <- a[c(1, rep(2:(r - 1), each = 2), r), ]
+    a <- as.vector(t(a))
+    dim(a) <- c(4, r - 1)
+    return(a)
+}
+"fpp" <-
+function (z, p, give = FALSE) 
+{
+    attr <- attributes(z)
+    z <- as.vector(z)
+    jj.mn <- round(mn(z, p))
+    z <- z - jj.mn %*% p
+    attributes(z) <- attr
+    if (give) {
+        return(list(answer = z, mn = jj.mn))
+    }
+    else {
+        return(z)
+    }
+}
+"g2.fun" <-
+function (b, use.first = TRUE, ...) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q <- exp(pi * (1i) * tau)
+    jj2 <- theta2(0, q = q, ...)
+    jj3 <- theta3(0, q = q, ...)
+    jj4 <- theta4(0, q = q, ...)
+    if (use.first) {
+        return((1/12) * (pi/p1)^4 * (jj2^8 - jj3^4 * jj2^4 + 
+            jj3^8))
+    }
+    else {
+        return((2/3) * (pi/(2 * p1))^4 * (jj2^8 + jj3^8 + jj4^8))
+    }
+}
+"g2.fun.direct" <-
+function (b, nmax = 50, tol = 1e-10) 
+{
+    if (length(b) == 1) {
+        b <- c(1, b)
+    }
+    jj <- expand.grid(-nmax:nmax, -nmax:nmax)
+    jj <- jj[-(2 * nmax * nmax + 2 * nmax + 1), ]
+    jj <- as.matrix(jj)
+    return(massage(60 * sum(1/(2 * jj %*% b)^4), tol = tol))
+}
+"g2.fun.divisor" <-
+function (b, nmax = 50, strict = TRUE, tol = 1e-10) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q <- exp(pi * (1i) * tau)
+    s <- 0
+    for (n in 1:nmax) {
+        s.new <- s + divisor(n, 3) * q^(2 * n)
+        if (near.match(s, s.new)) {
+            return(massage((pi/p1)^4 * (1/12 + 20 * s), tol = tol))
+        }
+        s <- s.new
+    }
+    if (strict) {
+        stop("series not converged")
+    }
+    else {
+        warning("series not converged.  Partial sum returned")
+        return(massage((2 * pi/p1)^4 * (1/12 + 20 * s), tol = tol))
+    }
+}
+"g2.fun.fixed" <-
+function (b, nmax = 50, tol=1e-10, give = FALSE) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q <- exp(pi * (1i) * tau)
+    jj <- 1:nmax
+    ee <- q^(2 * jj)
+    out <- jj^3 * ee/(1 - ee)
+    if (give) {
+        out <- cumsum(out)
+    }
+    else {
+        out <- sum(out)
+    }
+    return(massage((pi/p1)^4 * (1/12 + 20 * out), tol = tol))
+}
+"g2.fun.lambert" <-
+function (b, nmax = 50, tol = 1e-10, strict = TRUE) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q.sq <- exp(2 * pi * (1i) * tau)
+    s <- 0
+    q.sq.power.n <- q.sq
+    for (n in 1:nmax) {
+        s.new <- s + n^3 * q.sq.power.n/(1 - q.sq.power.n)
+        if (near.match(s, s.new)) {
+            return(massage((pi/p1)^4 * (1/12 + 20 * s), tol = tol))
+        }
+        s <- s.new
+        q.sq.power.n <- q.sq.power.n * q.sq
+    }
+    if (strict) {
+        stop("series not converged")
+    }
+    else {
+        warning("series not converged.  Partial sum returned")
+        return(massage((pi/p1)^4 * (1/12 + 20 * s), tol = tol))
+    }
+}
+"g2.fun.vectorized" <-
+function (b, nmax = 50, tol = 1e-10, give = FALSE) 
+{
+    if (is.vector(b)) {
+        p1 <- rep(1, length(b))
+        tau <- b
+    }
+    else {
+        p1 <- b[, 1]
+        tau <- b[, 2]/p1
+    }
+    if (any(Im(tau) < 0)) {
+        stop("Im(tau)<0")
+    }
+    q2 <- exp(pi * (0+2i) * tau)
+    jj <- 1:nmax
+    out <- outer(jj, q2, function(n, q) {
+        n^3 * q^n/(1 - q^n)
+    })
+    if (give) {
+        out <- apply(out, 2, cumsum)
+    }
+    else {
+        out <- apply(out, 2, sum)
+    }
+    return(massage((pi/p1)^4 * (1/12 + 20 * out), tol = tol))
+}
+"g3.fun" <-
+function (b, use.first = TRUE, ...) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q <- exp(pi * (1i) * tau)
+    jj2 <- theta2(0, q = q, ...)^4
+    jj3 <- theta3(0, q = q, ...)^4
+    jj4 <- theta4(0, q = q, ...)^4
+    if (use.first) {
+        return((pi/(2 * p1))^6 * ((8/27) * (jj2^3 + jj3^3) - 
+            (4/9) * (jj2 + jj3) * jj2 * jj3))
+    }
+    else {
+        return((4/27) * (pi/(2 * p1))^6 * (jj2 + jj3) * (jj3 + 
+            jj4) * (jj4 - jj2))
+    }
+}
+"g3.fun.direct" <-
+function (b, nmax = 50, tol = 1e-10) 
+{
+    if (length(b) == 1) {
+        b <- c(1, b)
+    }
+    jj <- expand.grid(-nmax:nmax, -nmax:nmax)
+    jj <- jj[-(2 * nmax * nmax + 2 * nmax + 1), ]
+    jj <- as.matrix(jj)
+    return(massage(140 * sum(1/(2 * jj %*% b)^6), tol = tol))
+}
+"g3.fun.divisor" <-
+function (b, nmax = 50, strict = TRUE, tol = 1e-10) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q <- exp(pi * (1i) * tau)
+    s <- 0
+    for (n in 1:nmax) {
+        s.new <- s + divisor(n, 5) * q^(2 * n)
+        if (near.match(s, s.new)) {
+            return(massage((pi/p1)^6 * (1/216 - 7/3 * s), tol = tol))
+        }
+        s <- s.new
+    }
+    if (strict) {
+        stop("series not converged")
+    }
+    else {
+        warning("series not converged.  Partial sum returned")
+        return(massage((2 * pi/p1)^6 * (1/216 - 7/3 * s), tol = tol))
+    }
+}
+"g3.fun.fixed" <-
+function (b, nmax = 50, tol = 1e-10, give = FALSE) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q <- exp(pi * (1i) * tau)
+    jj <- 1:nmax
+    ee <- q^(2 * jj)
+    out <- jj^5 * ee/(1 - ee)
+    if (give) {
+        out <- cumsum(out)
+    }
+    else {
+        out <- sum(out)
+    }
+    return(massage((pi/p1)^6 * (1/216 - 7/3 * out), tol = tol))
+}
+"g3.fun.lambert" <-
+function (b, nmax = 50, tol = 1e-10, strict = TRUE) 
+{
+    jj <- p1.tau(b)
+    p1 <- jj$p1
+    tau <- jj$tau
+    q.sq <- exp(2 * pi * (1i) * tau)
+    s <- 0
+    q.sq.power.n <- q.sq
+    for (n in 1:nmax) {
+        s.new <- s + n^5 * q.sq.power.n/(1 - q.sq.power.n)
+        if (near.match(s, s.new)) {
+            return(massage((pi/p1)^6 * (1/216 - 7/3 * s), tol = tol))
+        }
+        s <- s.new
+        q.sq.power.n <- q.sq.power.n * q.sq
+    }
+    if (strict) {
+        stop("series not converged")
+    }
+    else {
+        warning("series not converged.  Partial sum returned")
+        return(massage((pi/p1)^4 * (1/12 + 20 * s), tol = tol))
+    }
+}
+"g3.fun.vectorized" <-
+function (b, nmax = 50, tol = 1e-10, give = FALSE) 
+{
+    if (is.vector(b)) {
+        p1 <- rep(1, length(b))
+        tau <- b
+    }
+    else {
+        p1 <- b[, 1]
+        tau <- b[, 2]/p1
+    }
+    if (any(Im(tau) < 0)) {
+        stop("Im(tau)<0")
+    }
+    q2 <- exp(pi * (0+2i) * tau)
+    jj <- 1:nmax
+    out <- outer(jj, q2, function(n, q) {
+        n^5 * q^n/(1 - q^n)
+    })
+    if (give) {
+        out <- apply(out, 2, cumsum)
+    }
+    else {
+        out <- apply(out, 2, sum)
+    }
+    return(massage((pi/p1)^6 * (1/216 - 7/3 * out), tol = tol))
+}
+"g.fun" <-
+function (b, ...) 
+{
+    c(g2 = g2.fun(b, ...), g3 = g3.fun(b, ...))
+}
+"H" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v = pi * u/(2 * K)
+    return(theta1(v, m = m, ...))
+}
+"H1" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v = pi * u/(2 * K)
+    return(theta2(v, m = m, ...))
+}
+"half.periods" <-
+function (ignore = NULL, e = NULL, g = NULL, primitive = TRUE) 
+{
+    if (!xor(is.null(e), is.null(g))) {
+        stop("supply exactly one of e, g")
+    }
+    if (is.null(e)) {
+        e <- e1e2e3(g)
+    }
+    omega1 <- K.fun((e[2] - e[3])/(e[1] - e[3]))/sqrti(e[1] - 
+        e[3])
+    omega2 <- (1i)/sqrti(e[1] - e[3]) * K.fun(1 - (e[2] - e[3])/(e[1] - 
+        e[3]))
+    if (primitive) {
+        return(as.primitive(c(omega1, omega2)))
+    }
+    else {
+        return(c(omega1, omega2))
+    }
+}
+"Im<-" <-
+function (x, value) 
+{
+    if (is.complex(value)) {
+        stop("RHS must be pure real")
+    }
+    if (all(value == 0)) {
+        return(Re(x))
+    }
+    else {
+        return(Re(x) + (1i) * value)
+    }
+}
+"integrate.contour" <-
+function (f, u, udash, ...) 
+{
+    myintegrate(function(x, ...) {
+        f(u(x), ...) * udash(x)
+    }, lower = 0, upper = 1, ...)
+}
+"integrate.segments" <-
+function (f, points, close = TRUE, ...) 
+{
+    if (isTRUE(close)) {
+        points <- c(points, points[1])
+    }
+    out <- 0
+    for (i in 1:(length(points) - 1)) {
+        u <- function(z) {
+            points[i] + (points[i + 1] - points[i]) * z
+        }
+        udash <- function(z) {
+            points[i + 1] - points[i]
+        }
+        out <- out + integrate.contour(f, u, udash, ...)
+    }
+    return(out)
+}
+"is.primitive" <-
+function (p, n = 3, tol = 1e-05) 
+{
+    all(abs(p - as.primitive(p, n = n, tol = tol)) < tol)
+}
+"J" <-
+function (tau, use.theta = TRUE, ...) 
+{
+    if (use.theta) {
+        q <- exp(pi * (1i) * tau)
+        jj.2 <- theta2(z = 0, q = q, ...)
+        jj.3 <- theta3(z = 0, q = q, ...)
+        jj.4 <- theta4(z = 0, q = q, ...)
+        return((jj.2^8 + jj.3^8 + jj.4^8)^3/(jj.2 * jj.3 * jj.4)^8/54)
+    }
+    else {
+        return(1/(1 - 27 * g3.fun(tau, ...)^2/g2.fun(tau, ...)^3))
+    }
+}
+"K.fun" <-
+function (m, strict = TRUE, maxiter = 7) 
+{
+    a.old <- 1
+    b.old <- sqrti(1 - m)
+    for (i in 1:maxiter) {
+        a.new <- 0.5 * (a.old + b.old)
+        b.new <- sqrti(a.old * b.old)
+        if (near.match(a.new, a.old)) {
+            return(pi/(2 * a.new))
+        }
+        a.old <- a.new
+        b.old <- b.new
+    }
+    if (strict) {
+        stop("iteration not stable")
+    }
+    else {
+        warning("iteration not stable: partial result returned")
+        return(pi/(2 * a.new))
+    }
+}
+"lambda" <-
+function (tau, ...) 
+{
+    q <- exp(pi * (1i) * tau)
+    (theta2(z = 0, q = q, ...)/theta3(z = 0, q = q, ...))^4
+}
+"latplot" <-
+function (p, n = 10, do.lines = TRUE, ...) 
+{
+    p1 <- p[1]
+    p2 <- p[2]
+    plot(lattice(p, n), xaxt = "n", yaxt = "n", bty = "n", pch = 16, 
+        ...)
+    axis(1, pos = 0, lwd = 2)
+    axis(2, pos = 0, lwd = 2)
+    slope1 <- Im(p1)/Re(p1)
+    slope2 <- Im(p2)/Re(p2)
+    int1 <- Im(p2) - Re(p2) * (slope1)
+    int2 <- Im(p1) - Re(p1) * (slope2)
+    if (do.lines) {
+        for (u in -n:n) {
+            if (is.finite(slope1)) {
+                abline(u * int1, slope1, col = "gray")
+            }
+            else {
+                abline(v = u * Re(p2), col = "gray")
+            }
+            if (is.finite(slope2)) {
+                abline(u * int2, slope2, col = "gray")
+            }
+            else {
+                abline(v = u * Re(p1), col = "gray")
+            }
+        }
+    }
+    points(Re(p1), Im(p1), pch = 16, cex = 3, col = "red")
+    points(Re(p2), Im(p2), pch = 16, cex = 3, col = "green")
+}
+"lattice" <-
+function (p, n) 
+{
+    outer(p[1] * (-n:n), p[2] * (-n:n), "+")
+}
+"lemniscatic" <-
+function (...) 
+{
+    omega1 <- gamma(1/4)^2/(4 * sqrt(pi))
+    omega2 <- (1i) * omega1
+    Omega <- c(omega1, omega2)
+    e <- c(1/2, 0, -1/2)
+    names(e) <- c("e1", "e2", "e3")
+    jj <- pi/4/omega1
+    Eta <- c(jj, -jj * (1i), jj * (1i - 1))
+    out <- list(Omega = Omega, q = exp(pi * (1i) * omega2/omega1), 
+        e = e, g = c(g2 = 1, g3 = 0), Delta = 1, Eta = Eta, is.AnS = TRUE, 
+        given = "d")
+    class(out) <- "parameters"
+    return(out)
+}
+"limit" <-
+function (x, upper = quantile(Re(x), 0.99, na.rm = TRUE), lower = quantile(Re(x), 
+    0.01, na.rm = TRUE), na = FALSE) 
+{
+    if (is.complex(x)) {
+        return(Recall(Re(x), upper = upper, lower = lower, na = na) + 
+            (1i) * Recall(Im(x), upper = upper, lower = lower, 
+                na = na))
+    }
+    if (na) {
+        x[x < lower] <- NA
+        x[x > upper] <- NA
+    }
+    else {
+        x <- pmax(x, lower)
+        x <- pmin(x, upper)
+    }
+    return(x)
+}
+"massage" <-
+function (z, tol = 1e-10) 
+{
+    if (length(z) == 1) {
+        if (abs(Im(z)) < tol) {
+            return(Re(z))
+        }
+        else {
+            if (abs(Re(z)) < tol) {
+                return((1i) * Im(z))
+            }
+            else {
+                return(z)
+            }
+        }
+    }
+    Im(z[abs(Im(z)) < tol]) <- 0
+    Re(z[abs(Re(z)) < tol]) <- 0
+    if (all(Im(z) == 0)) {
+        z <- Re(z)
+    }
+    return(z)
+}
+"mn" <-
+function (z, p) 
+{
+    p1 <- p[1]
+    p2 <- p[2]
+    m <- (Re(z) * Im(p2) - Im(z) * Re(p2))/(Re(p1) * Im(p2) - 
+        Im(p1) * Re(p2))
+    n <- (Re(z) * Im(p1) - Im(z) * Re(p1))/(Re(p2) * Im(p1) - 
+        Im(p2) * Re(p1))
+    cbind(m, n)
+}
+"mob" <-
+function (M, x) 
+{
+    (M[1] * x + M[3])/(M[2] * x + M[4])
+}
+"%mob%" <-
+function (M, x) 
+{
+    mob(M, x)
+}
+"mobius" <-
+function (n) 
+{
+    if (length(n) > 1) {
+        return(sapply(n, mobius))
+    }
+    if (n == 1) {
+        return(1)
+    }
+    jj <- table(factorize(n))
+    if (any(jj > 1)) {
+        return(as.integer(0))
+    }
+    else {
+        return(as.integer((-1)^length(jj)))
+    }
+}
+"myintegrate" <-
+function (f, lower, upper, ...) 
+{
+    f.real <- function(x, ...) {
+        Re(f(x, ...))
+    }
+    f.imag <- function(x, ...) {
+        Im(f(x, ...))
+    }
+    jj.1 <- integrate(f.real, lower = lower, upper = upper, ...)
+    jj.2 <- integrate(f.imag, lower = lower, upper = upper, ...)
+    jj.1$value + (1i) * jj.2$value
+}
+"nc" <-
+function (u, m, ...) 
+{
+    theta.n(u, m = m, ...)/theta.c(u, m = m, ...)
+}
+"nd" <-
+function (u, m, ...) 
+{
+    theta.n(u, m = m, ...)/theta.d(u, m = m, ...)
+}
+"near.match" <-
+function (x, y, tol = NULL) 
+{
+    if (is.null(tol)) {
+        tol <- .Machine$double.eps * 2
+    }
+    return(isTRUE(all.equal(x, y, tol = tol)))
+}
+"newton_raphson" <-
+function (initial, f, fdash, maxiter, give=TRUE, tol = .Machine$double.eps) 
+{
+    old.guess <- initial
+    for (i in seq_len(maxiter)) {
+        new.guess <- old.guess - f(old.guess)/fdash(old.guess)
+        jj <- f(new.guess)
+        if(is.na(jj) | is.infinite(jj)){
+          break
+        }
+        if (near.match(new.guess, old.guess) | abs(jj) < tol) {
+          if(give){
+            return(list(root=new.guess,
+                        f.root=jj,
+                        iter=i))
+          } else {
+            return(new.guess)
+          }
+        }
+        old.guess <- new.guess
+    }
+    stop("did not converge")
+}
+
+"nn" <-
+function (u, m, ...) 
+{
+    theta.n(u, m = m, ...)/theta.n(u, m = m, ...)
+}
+"nome" <-
+function (m) 
+{
+    K <- K.fun(m)
+    Kdash <- K.fun(1 - m)
+    return((exp(-pi * Kdash/K)))
+}
+"nome.k" <-
+function (k) 
+{
+    K <- K.fun(m = sqrt(k))
+    Kdash <- K.fun(sqrt(1 - k^2))
+    return((exp(-pi * Kdash/K)))
+}
+"ns" <-
+function (u, m, ...) 
+{
+    theta.n(u, m = m, ...)/theta.s(u, m = m, ...)
+}
+"P" <-
+function (z, g = NULL, Omega = NULL, params = NULL, use.fpp = TRUE, 
+    give.all.3 = FALSE, ...) 
+{
+    if (is.null(params)) {
+        params <- parameters(g = g, Omega = Omega)
+    }
+    if (use.fpp) {
+        z <- fpp(z, p = 2 * params$Omega)
+    }
+    e <- params$e
+    q <- params$q
+    omega <- params$Omega[1]
+    v <- pi * z/(2 * omega)
+    out1 <- e[1] + pi^2/(4 * omega^2) * (theta1.dash.zero.q(q) * 
+        theta2(v, q = q, ...)/theta2(0, q = q, ...)/theta1(v, 
+        q = q, ...))^2
+    if (give.all.3) {
+        out2 <- e[2] + pi^2/(4 * omega^2) * (theta1.dash.zero.q(q) * 
+            theta3(v, q = q, ...)/theta3(0, q = q, ...)/theta1(v, 
+            q = q, ...))^2
+        out3 <- e[3] + pi^2/(4 * omega^2) * (theta1.dash.zero.q(q) * 
+            theta4(v, q = q, ...)/theta4(0, q = q, ...)/theta1(v, 
+            q = q, ...))^2
+        return(drop(cbind(out1, out2, out3)))
+    }
+    else {
+        attributes(out1) <- attributes(z)
+        return(out1)
+    }
+}
+"p1.tau" <-
+function (b) 
+{
+    if (length(b) == 2) {
+        p1 <- b[1]
+        tau <- b[2]/b[1]
+    }
+    else {
+        if (identical(ncol(b), 2)) {
+            p1 <- b[1, ]
+            tau <- b[2, ]/b[1, ]
+        }
+        else {
+            p1 <- 1
+            tau <- b
+        }
+    }
+    if (any(Im(tau) < 0)) {
+        warning("g2 and g3 not defined where Im(p2/p1)")
+    }
+    return(list(p1 = p1, tau = tau))
+}
+"parameters" <-
+function (Omega = NULL, g = NULL, description = NULL) 
+{
+    jj <- c(!is.null(Omega), !is.null(g), !is.null(description))
+    if (sum(jj) != 1) {
+        stop("function must be supplied with exactly one argument")
+    }
+    if (!is.null(description)) {
+        return(switch(description, equianharmonic = equianharmonic(), 
+            lemniscatic = lemniscatic(), pseudolemniscatic = pseudolemniscatic(), 
+            ))
+    }
+    if (is.null(Omega)) {
+        given <- "g"
+        g2 <- g[1]
+        g3 <- g[2]
+        Omega <- half.periods(g = g)
+    }
+    else {
+        given <- "o"
+        if (!is.primitive(Omega)) {
+            warning("Omega supplied not a primitive pair of half periods.  Function converting Omega to a primitive pair ")
+            Omega <- as.primitive(Omega)
+        }
+        g <- g.fun(Omega)
+        g2 <- g[1]
+        g3 <- g[2]
+    }
+    e <- e1e2e3(g, Omega = Omega)
+    Delta <- g2^3 - 27 * g3^2
+    omega1 <- Omega[1]
+    omega2 <- Omega[2]
+    omega3 <- -omega1 - omega2
+    p1 <- 2 * omega1
+    p2 <- 2 * omega2
+    jj.q <- exp(pi * (1i) * omega2/omega1)
+    eta1 <- -pi^2 * theta1dashdashdash(0, q = jj.q)/(12 * omega1 * 
+        theta1.dash.zero.q(q = jj.q))
+    eta2 <- omega2/omega1 * eta1 - pi * (1i)/(2 * omega1)
+    eta3 <- -eta2 - eta1
+    Eta <- c(eta1, eta2, eta3)
+    out <- list(Omega = Omega, q = exp(pi * (1i) * (omega2)/omega1), 
+        e = e, g = g, Delta = Delta, Eta = Eta, 
+        is.AnS = FALSE, given = given)
+    class(out) <- "parameters"
+    return(out)
+}
+"Pdash" <-
+function (z, g = NULL, Omega = NULL, params = NULL, use.fpp = TRUE, 
+    ...) 
+{
+    if (is.null(params)) {
+        params <- parameters(g = g, Omega = Omega)
+    }
+    if (use.fpp) {
+        z <- fpp(z, p = 2 * params$Omega)
+    }
+    q <- params$q
+    omega <- params$Omega[1]
+    v <- pi * z/(2 * omega)
+    out <- -pi^3/(4 * omega^3) * theta2(v, q = q, ...) * theta3(v, 
+        q = q, ...) * theta4(v, q = q, ...) * theta1dash(0, q = q, 
+        ...)^3/(theta2(0, q = q, ...) * theta3(0, q = q, ...) * 
+        theta4(0, q = q, ...) * theta1(v, q = q, ...)^3)
+    return(out)
+}
+"Pdash.laurent" <-
+function (z, g = NULL, nmax = 80) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    ckn <- ck(g = c(g2, g3), n = nmax)
+    psum <- z * 0
+    z.squared <- z * z
+    zz <- z
+    for (k in 2:nmax) {
+        psum.new <- psum + zz * ckn[k] * (2 * k - 2)
+        if (near.match(psum, psum.new) & ckn[k] > 0) {
+            return(-2/z^3 + psum)
+        }
+        psum <- psum.new
+        zz <- zz * z.squared
+    }
+    warning("series not converged.  See p636 for radius of convergence")
+    return(-2/z^3 + psum)
+}
+"P.laurent" <-
+function (z, g = NULL, tol = 0, nmax = 80) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    ckn <- ck(g = c(g2, g3), n = nmax)
+    psum <- z * 0
+    z.squared <- z * z
+    zz <- z.squared
+    for (k in 2:nmax) {
+        psum.new <- psum + zz * ckn[k]
+        if (near.match(psum, psum.new, tol = tol) & abs(ckn[k]) > 
+            0) {
+            return(1/z^2 + psum)
+        }
+        psum <- psum.new
+        zz <- zz * z.squared
+    }
+    warning("series not converged; partial sum returned.  See p636 for radius of convergence")
+    return(1/z^2 + psum)
+}
+"P.pari" <-
+function (z, Omega, pari.fun = "ellwp", numerical = TRUE) 
+{
+    attr <- attributes(z)
+    z <- as.vector(z)
+    a <- cbind(z, Omega[1], Omega[2])
+    out <- NULL
+    pari.complex <- function(x) {
+        gsub("i", "*I", x)
+    }
+    for (i in 1:nrow(a)) {
+        string <- paste("echo '", pari.fun, "([", pari.complex(2 * 
+            a[i, 2]), ",", pari.complex(2 * a[i, 3]), "],", pari.complex(a[i, 
+            1]), ")' | gp -q")
+        jj <- gsub(" ", "", sub("\\*I", "i", system(string, intern = TRUE)))
+        if (numerical) {
+            jj <- as.complex(jj)
+        }
+        out <- c(out, jj)
+    }
+    attributes(out) <- attr
+    return(out)
+}
+"primes" <-
+function (n) 
+{
+    if ((M2 <- max(n)) <= 1) 
+        return(numeric(0))
+    x <- 1:M2
+    x[1] <- 0
+    p <- 1
+    M <- floor(sqrt(M2))
+    while ((p <- p + 1) <= M) if (x[p] != 0) 
+        x[seq(p^2, n, p)] <- 0
+    as.integer(x[x > 0])
+}
+"pseudolemniscatic" <-
+function (...) 
+{
+    jj <- gamma(1/4)^2/(4 * sqrt(2 * pi))
+    Omega <- c(jj * (1 - (1i)), jj * (1 + (1i)))
+    e <- c(1/2, 0, -1/2) * (1i)
+    names(e) <- c("e1", "e2", "e3")
+    jj <- pi/4/Omega[1]
+    Eta <- c(jj, -jj * (1i), jj * (1i - 1))
+    out <- list(Omega = Omega, q = exp(pi * (1i) * Omega[2]/Omega[1]), 
+        e = e, g = c(g2 = -1, g3 = 0), Delta = 1, Eta = Eta, 
+        is.AnS = TRUE, given = "d")
+    class(out) <- "parameters"
+    return(out)
+}
+"residue" <- function(f, z0, r, O=z0, ...){
+    if(r <= abs(z0-O)){
+        warning("contour does not wrap round z0.  Either increase r or move O closer to z0")
+    }
+
+    if(is.complex(r)){
+        warning('imaginary part of r discarded')
+        r <- Re(r)
+    }
+        
+    u <- function(x){O+r*exp(pi*2i*x)} # 0 <= x <= 1 
+    udash <- function(x){r*pi*2i*exp(pi*2i*x)}
+    integrate.contour(function(z,...){f(z,...)/(z-z0)},u,udash,...)/(pi*2i)
+}
+"Re<-" <-
+function (x, value) 
+{
+    if (is.complex(value)) {
+        stop("RHS must be pure real")
+    }
+    return((1i) * Im(x) + value)
+}
+"sc" <-
+function (u, m, ...) 
+{
+    theta.s(u, m = m, ...)/theta.c(u, m = m, ...)
+}
+"sd" <-
+function (u, m, ...) 
+{
+    theta.s(u, m = m, ...)/theta.d(u, m = m, ...)
+}
+"sigma" <-
+function (z, g = NULL, Omega = NULL, params = NULL, use.theta = TRUE, 
+    ...) 
+{
+    if (is.null(params)) {
+        params <- parameters(g = g, Omega = Omega)
+    }
+    Omega <- params$Omega
+    Eta <- params$Eta
+    if (use.theta) {
+        o <- Omega[1]
+        q <- exp(pi * (1i) * Omega[2]/Omega[1])
+        return(2 * o/pi * exp(Eta[1] * z^2/2/o) * theta1(pi * 
+            z/2/o, q = q, ...)/theta1dash(0, q = q, ...))
+    }
+    jj <- fpp(z, 2 * Omega, give = TRUE)
+    z <- jj$answer
+    M <- jj$mn[, 1]
+    N <- jj$mn[, 2]
+    (-1)^(M + N + M * N) * Recall(z, params = params, use.theta = TRUE, 
+        ...) * exp((z + M * Omega[1] + N * Omega[2]) * (2 * M * 
+        Eta[1] + 2 * N * Eta[2]))
+}
+"sigmadash.laurent" <-
+function (z, g = NULL, nmax = 8, give.error = FALSE) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    attr <- attributes(z)
+    z <- as.vector(z)
+    jj <- amn(nmax)
+    if (give.error) {
+        minor.diag <- row(jj) == 1:nmax & col(jj) == nmax:1
+        jj[!minor.diag] <- 0
+    }
+    m <- col(jj) - 1
+    n <- row(jj) - 1
+    non.z <- as.vector(jj * (g2/2)^m * (2 * g3)^n/factorial(4 * 
+        m + 6 * n))
+    power.z <- as.vector(4 * m + 6 * n)
+    out <- outer(z, power.z, "^")
+    out <- sweep(out, 2, non.z, "*")
+    if (give.error) {
+        out <- apply(out, 1, function(x) {
+            sum(abs(x))
+        })
+    }
+    else {
+        out <- apply(out, 1, sum)
+    }
+    attributes(out) <- attr
+    return(out)
+}
+"sigma.laurent" <-
+function (z, g = NULL, nmax = 8, give.error = FALSE) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    attr <- attributes(z)
+    z <- as.vector(z)
+    jj <- amn(nmax)
+    if (give.error) {
+        minor.diag <- row(jj) == 1:nmax & col(jj) == nmax:1
+        jj[!minor.diag] <- 0
+    }
+    m <- col(jj) - 1
+    n <- row(jj) - 1
+    non.z <- as.vector(jj * (g2/2)^m * (2 * g3)^n/factorial(4 * 
+        m + 6 * n + 1))
+    power.z <- as.vector(4 * m + 6 * n + 1)
+    out <- outer(z, power.z, "^")
+    out <- sweep(out, 2, non.z, "*")
+    if (give.error) {
+        out <- apply(out, 1, function(x) {
+            sum(abs(x))
+        })
+    }
+    else {
+        out <- apply(out, 1, sum)
+    }
+    attributes(out) <- attr
+    return(out)
+}
+"sn" <-
+function (u, m, ...) 
+{
+    theta.s(u, m = m, ...)/theta.n(u, m = m, ...)
+}
+"sqrti" <-
+function (x) 
+{
+    if (is.complex(x)) {
+        return(sqrt(x))
+    }
+    if (any(x < 0)) {
+        return(sqrt(x + 0i))
+    }
+    else {
+        return(sqrt(x))
+    }
+}
+"ss" <-
+function (u, m, ...) 
+{
+    1
+}
+"Theta" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v = pi * u/(2 * K)
+    return(theta4(v, m = m, ...))
+}
+"theta.00" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 1:maxiter) {
+        out.new <- out + q^(n^2) * cos(2 * z * n)
+        if (near.match(out, out.new)) {
+            ans <- 1 + 2 * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta.01" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 1:maxiter) {
+        out.new <- out + (-1)^n * q^(n^2) * cos(2 * z * n)
+        if (near.match(out, out.new)) {
+            ans <- 1 + 2 * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta1" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + (-1)^n * q^(n * (n + 1)) * sin((2 * 
+            n + 1) * z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"Theta1" <-
+function (u, m, ...) 
+{
+    K <- K.fun(m)
+    v = pi * u/(2 * K)
+    return(theta3(v, m = m, ...))
+}
+"theta.10" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + q^(n * (n + 1)) * cos((2 * n + 1) * 
+            z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta.11" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + (-1)^n * q^(n * (n + 1)) * sin((2 * 
+            n + 1) * z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta1dash" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + (-1)^n * q^(n * (n + 1)) * (2 * n + 
+            1) * cos((2 * n + 1) * z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta1dashdash" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out - (-1)^n * q^(n * (n + 1)) * (2 * n + 
+            1)^2 * sin((2 * n + 1) * z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta1dashdashdash" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out - (-1)^n * q^(n * (n + 1)) * (2 * n + 
+            1)^3 * cos((2 * n + 1) * z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta1.dash.zero" <-
+function (m, ...) 
+{
+    theta2(0, m = m, ...) * theta3(0, m = m, ...) * theta4(0, 
+        m = m, ...)
+}
+"theta1.dash.zero.q" <-
+function (q, ...) 
+{
+    theta2(0, q = q, ...) * theta3(0, q = q, ...) * theta4(0, 
+        q = q, ...)
+}
+"theta2" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 0:maxiter) {
+        out.new <- out + q^(n * (n + 1)) * cos((2 * n + 1) * 
+            z)
+        if (near.match(out, out.new)) {
+            ans <- 2 * q^(1/4) * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta3" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 1:maxiter) {
+        out.new <- out + q^(n^2) * cos(2 * z * n)
+        if (near.match(out, out.new)) {
+            ans <- 1 + 2 * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta4" <-
+function (z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, 
+    maxiter = 30) 
+{
+    if (!xor(is.null(m), is.null(q))) {
+        stop("supply exactly one of m, q")
+    }
+    if (is.null(q)) {
+        q <- nome(m)
+    }
+    out <- 0
+    for (n in 1:maxiter) {
+        out.new <- out + (-1)^n * q^(n^2) * cos(2 * z * n)
+        if (near.match(out, out.new)) {
+            ans <- 1 + 2 * out
+            if (give.n) {
+                return(list(iterations = n, ans = ans))
+            }
+            else {
+                return(ans)
+            }
+        }
+        out <- out.new
+    }
+    stop("maximum iterations reached")
+}
+"theta.c" <-
+function (u, m, method = "16.36.6", ...) 
+{
+    switch(method, "16.36.6" = e16.36.6b(u, m, ...), "16.36.6b" = e16.36.6b(u, 
+        m, ...), "16.36.1b" = e16.36.6b(u, m, ...), "16.37.2" = e16.37.2(u, 
+        m, ...), "16.38.2" = e16.38.2(u, m, ...), stop("method not recognized"))
+}
+"theta.d" <-
+function (u, m, method = "16.36.7", ...) 
+{
+    switch(method, "16.36.7" = e16.36.7a(u, m, ...), "16.36.7a" = e16.36.7a(u, 
+        m, ...), "16.36.2a" = e16.36.7a(u, m, ...), "16.37.3" = e16.37.3(u, 
+        m, ...), "16.38.3" = e16.38.3(u, m, ...), stop("method not recognized"))
+}
+"theta.n" <-
+function (u, m, method = "16.36.7", ...) 
+{
+    switch(method, "16.36.7" = e16.36.7b(u, m, ...), "16.36.7b" = e16.36.7b(u, 
+        m, ...), "16.36.2b" = e16.36.7b(u, m, ...), "16.37.4" = e16.37.4(u, 
+        m, ...), "16.38.4" = e16.38.4(u, m, ...), stop("method not recognized"))
+}
+"theta.s" <-
+function (u, m, method = "16.36.6", ...) 
+{
+    switch(method, "16.36.6" = e16.36.6a(u, m, ...), "16.36.6a" = e16.36.6a(u, 
+        m, ...), "16.36.1a" = e16.36.6a(u, m, ...), "16.37.1" = e16.37.1(u, 
+        m, ...), "16.38.1" = e16.38.1(u, m, ...), stop("method not recognized"))
+}
+"totient" <-
+function (n) 
+{
+    if (length(n) > 1) {
+        return(sapply(n, match.fun(sys.call()[[1]])))
+    }
+    as.integer(n * prod(1 - 1/unique(factorize(n))))
+}
+"unimodular" <-
+function (n) 
+{
+    if (n == 1) {
+        return(array(diag(2), c(2, 2, 1)))
+    }
+    out <- do.call("cbind", sapply(0:n, farey))
+    out <- unique(out, MARGIN = 2)
+    dim(out) <- c(2, 2, length(out)/4)
+    return(out[2:1, , ])
+}
+"unimodularity" <-
+function (n, o, FUN, ...) 
+{
+    u <- unimodular(n)
+    N <- dim(u)[3]
+    out <- rep(0, N)
+    for (i in 1:N) {
+        out[i] <- FUN(drop(u[, , i] %*% o), ...)
+    }
+    return(out)
+}
+"view" <-
+function (x, y, z, scheme = 0, real.contour = TRUE, imag.contour = real.contour, 
+    default = 0, col = "black", r0 = 1, power = 1, show.scheme = FALSE, 
+    ...) 
+{
+    if (is.numeric(scheme)) {
+        f <- switch(as.character(scheme), "0" = function(z) {
+            u <- 2/pi * atan(Mod(z)/r0)
+            s0 <- Re(u * 0 + 1)
+            s0[u > 0.5] <- (2 * (1 - u[u > 0.5]))^power
+            v0 <- Re(u * 0 + 1)
+            v0[u < 0.5] <- (2 * u[u < 0.5])^power
+            return(hsv(h = scale(Arg(z)), s = s0, v = v0))
+        }, "1" = function(z) {
+            hsv(h = scale(Arg(z)), s = scale(Im(z)), v = 1)
+        }, "2" = function(z) {
+            hsv(h = g(Arg(z)), s = scale(abs(z)), v = 1)
+        }, "3" = function(z) {
+            hsv(h = scale(Re(z)), s = 1, v = scale(Mod(z))^power)
+        }, "4" = function(z) {
+            hsv(h = 0.4, s = 1, v = scale(Arg(z))^power)
+        }, "5" = function(z) {
+            hsv(h = 0.4, s = 0, v = 0.5 + 0.5 * (Im(z) > 0))
+        }, "6" = function(z) {
+            hsv(h = 0.4, s = 1, v = 0.5 + 0.5 * (Im(z) > 0))
+        }, "7" = function(z) {
+            hsv(h = scale(Re(z))^power, s = 1, v = scale(Mod(z))^power)
+        }, "8" = function(z) {
+            hsv(h = wrap(Arg(z)))
+        }, "9" = function(z) {
+            hsv(h = wrap(Arg(z)), v = scale(Mod(z))^power)
+        }, "10" = function(z) {
+            hsv(h = wrap(Arg(z)), v = scale(exp(-Mod(z))))
+        }, "11" = function(z) {
+            hsv(h = wrap(Arg(z)), s = scale(Mod(z))^power)
+        }, "12" = function(z) {
+            hsv(h = wrap(Arg(z)), s = scale(exp(-Mod(z))))
+        }, "13" = function(z) {
+            hsv(h = 0.3, s = 1, v = (floor(Re(z)) + floor(Im(z)))%%2)
+        }, "14" = function(z) {
+            hsv(h = wrap(Arg(z)), s = 1, v = (floor(Re(z)) + 
+                floor(Im(z)))%%2)
+        }, "15" = function(z) {
+            hsv(h = wrap(Arg(z)), s = 1, v = 0.4 + 0.4 * (floor(Re(z)) + 
+                floor(Im(z)))%%2)
+        }, "16" = function(z) {
+            hcl(h = 360 * wrap(Arg(z)), l = 100 * scale(Mod(z))^power)
+        }, "17" = function(z) {
+            hcl(h = 360 * wrap(Arg(z)), c = 100 * scale(Mod(z))^power)
+        }, "18" = function(z) {
+            rgb(red = scale(Re(z)), green = 1 - scale(Re(z))^power, 
+                blue = scale(Im(z))^power)
+        }, "19" = function(z) {
+            rgb(red = scale(Re(z)), green = scale(Im(z))^power, blue = 0)
+        }, function(z) {
+            hsv(s = 0, v = 1)
+        })
+    }
+    else {
+        f <- scheme
+        environment(f) <- environment()
+    }
+    if (show.scheme) {
+        return(f)
+    }
+    jj <- z
+    jj[] <- (1:length(z))/length(z)
+    jj <- Re(jj)
+    breakup <- function(x) {
+        ifelse(x > 1/2, 3/2 - x, 1/2 - x)
+    }
+    g <- function(x) {
+        0.5 + atan(x)/pi
+    }
+    scale <- function(x) {
+        (x - min(x))/(max(x) - min(x))
+    }
+    wrap <- function(x) {
+        1/2 + x/(2 * pi)
+    }
+    if (!is.na(default)) {
+        z[is.na(z)] <- default
+        z[is.infinite(z)] <- default
+    }
+    suppressWarnings(image(x, y, z = jj, col = f(z), asp = 1, ...))
+    if (real.contour) {
+      suppressWarnings(contour(x, y, Re(z), add = TRUE, lty = 1, col = col, 
+                               ...))
+    }
+    if (imag.contour) {
+      suppressWarnings(contour(x, y, Im(z), add = TRUE, lty = 2, col = col, ...))
+    }
+}
+"zeta" <-
+function (z, g = NULL, Omega = NULL, params = NULL, use.fpp = TRUE, 
+    ...) 
+{
+    if (is.null(params)) {
+        params <- parameters(g = g, Omega = Omega)
+    }
+    Omega <- params$Omega
+    Eta <- params$Eta
+    if (use.fpp) {
+        jj <- fpp(z, 2 * Omega, give = TRUE)
+        z <- jj$answer
+        M <- jj$mn[, 1]
+        N <- jj$mn[, 2]
+        return(Recall(z, params = params, use.fpp = FALSE, ...) + 
+            2 * M * Eta[1] + 2 * N * Eta[2])
+    }
+    else {
+        o <- Omega[1]
+        q <- exp(pi * (1i) * Omega[2]/Omega[1])
+        jj <- pi * z/(2 * o)
+        return(z * Eta[1]/o + pi * theta1dash(jj, q = q, ...)/(2 * 
+            o * theta1(jj, q = q, ...)))
+    }
+}
+"zeta.laurent" <-
+function (z, g = NULL, nmax = 80) 
+{
+    g2 <- g[1]
+    g3 <- g[2]
+    ckn <- ck(g = c(g2, g3), n = nmax)
+    psum <- z * 0
+    z.squared <- z * z
+    zz <- z * z.squared
+    for (k in 2:nmax) {
+        psum.new <- psum + zz * ckn[k]/(2 * k - 1)
+        if (near.match(psum, psum.new) & abs(ckn[k]) > 0) {
+            return(1/z - psum)
+        }
+        psum <- psum.new
+        zz <- zz * z.squared
+    }
+    warning("series not converged.  See p636 for radius of convergence")
+    return(1/z - psum)
+}
+
+# Following code for sigma1 sigma2 and sigma3 commented out
+# because output does not agree with Mathematica.  I'll investigate
+# before adding them to the package. rksh.
+
+#sigma1 <- function(u,params){
+#  o1 <- params$Omega[1]
+#  eta1 <- params$Eta[1]
+#exp(-eta1*u)*
+#  sigma(o1+u,params=params)/
+#    sigma(o1,params=params)
+#}
+#
+#sigma2 <- function(u,params){
+#  o2 <-  -sum(params$Omega)
+#  eta2 <- params$Eta[3]
+#exp(-eta2*u)*
+#  sigma(o2+u,params=params)/
+#    sigma(o2,params=params)
+#}
+#
+#sigma3 <- function(u,params){
+#  o3 <- params$Omega[2]
+#  eta3 <- params$Eta[2]
+#  exp(-eta3*u)*
+#    sigma(o3+u,params=params)/
+#      sigma(o3,params=params)
+#}
+#
+#
+
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..e6e9e73
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/demo/00Index b/demo/00Index
new file mode 100644
index 0000000..ae8fe1d
--- /dev/null
+++ b/demo/00Index
@@ -0,0 +1 @@
+elliptic   Some demos for the elliptic package
diff --git a/demo/elliptic.R b/demo/elliptic.R
new file mode 100644
index 0000000..e93992c
--- /dev/null
+++ b/demo/elliptic.R
@@ -0,0 +1,87 @@
+# Following plots and demos illustrate various functionality from the package.
+# Some of the parameters have been altered in the interest of speed.
+
+
+# Change 'n' to higher values to see higher resolution plots.
+n <- 200
+
+# Change 'val' to see the functions evaluated over a larger complex area
+val <- 4
+
+# Change 'no.of.schemes' to see more schemes used
+no.of.schemes <- 3
+
+# Change 'fact' to adjust the factor used for the upper-half-plane functions
+fact <- 3
+
+
+# Remember the settings:
+opar <- par(ask = dev.interactive(orNone = TRUE))
+
+
+
+# Define a complex grid:
+x <- seq(from = -val, to=val, len=n)
+y <- x
+z <- outer(x,1i*x,"+")
+
+# Now a grid for the upper half-plane functions:
+xupper <- x/fact
+yupper <- (y+val+val/n)/fact
+zupper <- outer(xupper,1i*yupper,"+")
+
+
+# A little wrapper for view():
+f <- function(...){
+  view(... , axes=FALSE,xlab="",ylab="")
+  jj <- c(-val,-val/2,0,val/2,val)
+  axis(1,pos=-val,at=jj)
+  axis(2,pos=-val,at=jj)
+}
+
+# A wrapper for view() for the upper half-plane functions:
+fupper <- function(...){
+  view(... , axes=FALSE,xlab="",ylab="")
+  jj <- c(-val,val)/fact
+  jj2 <- c(0,val*2)/fact
+  axis(1,pos=0,at=pretty(jj))
+  axis(2,pos=-val/fact,at=pretty(jj2))
+}
+
+# Tiny function for the title:
+fish <- function(string,i){
+  paste(string, ". scheme=", as.character(i),"; try increasing n",sep="")
+}
+
+# Wrapper to run view() a few times with differing args:
+jj <- function(fz,string){
+  for(i in sample(19,no.of.schemes)){
+    f(x,y,fz,scheme=i,real=FALSE,main=fish(string,i))
+  }
+  f(x,y,fz,scheme=-1,real=TRUE,imag=FALSE,nlevels=33,drawlabels=FALSE,main=fish(string,-1))
+  f(x,y,fz,scheme=-1,real=TRUE,nlevels=33,drawlabels=FALSE,main=fish(string,-1))
+}
+
+# Corresponding wrapper for the upper half plane functions:
+kk <- function(fz,string){
+  for(i in sample(19,no.of.schemes)){
+    fupper(xupper,yupper,fz,scheme=i,real=FALSE,main=fish(string,i))
+  }
+  fupper(xupper,yupper,fz,scheme=-1,real=TRUE,imag=FALSE,
+         nlevels=33,drawlabels=FALSE,main=fish(string,-1))
+  fupper(xupper,yupper,fz,scheme=-1,real=TRUE,imag=TRUE,
+         nlevels=33,drawlabels=FALSE,main=fish(string,-1))
+}
+
+# Now run everything; jj() and kk() take some time to complete:
+jj(fz=limit(sn(z,m=1/2+0.6i)),"sn(z)")
+jj(fz=limit(P(z,c(1+2.1i,1.3-3.2i))),"P(z)")
+jj(fz=limit(zeta(z,c(1+1i,2-3i))),"zeta(z)")
+jj(fz=limit(sigma(z,c(10+11i,20-31i))),"sigma(z)")
+kk(fz=limit(J(zupper,maxiter=100)),"J(z)")
+kk(fz=limit(lambda(zupper,maxiter=100)),"lambda(z)")
+
+
+
+# reset old settings:
+par(opar)
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000..8045ec5
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,28 @@
+citHeader("To cite in publications use:")
+
+citEntry(entry = "Article",
+	 title = {
+             paste("Introducing elliptic, an R package for elliptic
+	 and modular functions")
+	     },
+	 author = personList(
+         person(first = "Robin",
+		middle = "K. S.",
+		last = "Hankin",
+		email="r.hankin at noc.soton.ac.uk")),
+	 journal = "Journal of Statistical Software",
+         year = "2006",
+	 month = "February",
+	 volume = "15",
+	 issue = "7",
+	 textVersion = {
+             paste("R. K. S. Hankin",
+                   "2006.",
+                   "Introducing elliptic, an R package for elliptic
+	 and modular functions.",
+	     	   "Journal of Statistical Software",
+		   "15(7)"
+		   )
+         })
+
+
diff --git a/inst/doc/ellipticpaper.R b/inst/doc/ellipticpaper.R
new file mode 100644
index 0000000..236e3e4
--- /dev/null
+++ b/inst/doc/ellipticpaper.R
@@ -0,0 +1,765 @@
+### R code from vignette source 'ellipticpaper.Rnw'
+
+###################################################
+### code chunk number 1: requirepackage
+###################################################
+require(elliptic,quietly=TRUE)
+
+
+###################################################
+### code chunk number 2: setOverallImageQuality
+###################################################
+n <- 400
+n_BACCO <- 40
+
+
+###################################################
+### code chunk number 3: require_packages
+###################################################
+
+
+
+###################################################
+### code chunk number 4: ellipticpaper.Rnw:234-237
+###################################################
+require(elliptic)
+require(emulator)
+require(calibrator)
+
+
+###################################################
+### code chunk number 5: simple_usage_of_P
+###################################################
+z <- 1.9+1.8i
+P(z,g=c(1,1i))
+P(z,Omega=c(1,1i))
+
+
+###################################################
+### code chunk number 6: define_maxdiff
+###################################################
+maxdiff <- function(x,y){max(abs(x-y))}
+
+
+###################################################
+### code chunk number 7: laurent
+###################################################
+g <- c(3,2+4i)
+z <- seq(from=1,to=0.4+1i,len=34)
+
+
+###################################################
+### code chunk number 8: maxdiff_laurent
+###################################################
+maxdiff(P(z,g), P.laurent(z,g))
+
+
+###################################################
+### code chunk number 9: abs_e18.10.9
+###################################################
+abs( e18.10.9(parameters(g=g)))
+
+
+###################################################
+### code chunk number 10: lattice_figure
+###################################################
+jj <- parameters(g=c(1+1i,2-3i))$Omega
+latplot(jj,xlim=c(-4,4),ylim=c(-4,4),xlab="Re(z)",
+     ylab="Im(z)")
+polygon(Re(c(jj[1],sum(jj),jj[2],0)),
+        Im(c(jj[1],sum(jj),jj[2],0)),lwd=2,col="gray90",pch=16,cex=3)
+
+kk <- -c(3*jj[1] + 2*jj[2] , jj[1] + jj[2])  #det(matrix(c(3,2,1,1),2,2,T)==1
+
+polygon(Re(c(kk[1],sum(kk),kk[2],0)),
+        Im(c(kk[1],sum(kk),kk[2],0)),lwd=2,col="gray30",pch=16,cex=3)
+
+
+###################################################
+### code chunk number 11: congruence
+###################################################
+M <- congruence(c(4,9))
+
+
+###################################################
+### code chunk number 12: define_o
+###################################################
+o <- c(1,1i)
+
+
+###################################################
+### code chunk number 13: maxdiff_o
+###################################################
+maxdiff(g.fun(o), g.fun(M %*% o,maxiter=800))
+
+
+###################################################
+### code chunk number 14: u_udash
+###################################################
+u <- function(x){exp(pi*2i*x)}
+udash <- function(x){pi*2i*exp(pi*2i*x)}
+Zeta <- function(z){zeta(z,g)}
+Sigma <- function(z){sigma(z,g)}
+WeierstrassP <- function(z){P(z,g)}
+
+
+###################################################
+### code chunk number 15: integrate
+###################################################
+jj <- integrate.contour(Zeta,u,udash)
+
+
+###################################################
+### code chunk number 16: maxdiff_integrate
+###################################################
+maxdiff(jj, 2*pi*1i)
+
+
+###################################################
+### code chunk number 17: abs_integrate
+###################################################
+abs(integrate.contour(WeierstrassP,u,udash))
+
+
+###################################################
+### code chunk number 18: jj_omega
+###################################################
+jj.omega <- half.periods(g=c(1+1i,2-3i))
+
+
+###################################################
+### code chunk number 19: calculate_wp_figure
+###################################################
+x <- seq(from=-4, to=4, len=n)
+y <- x
+z <- outer(x,1i*x, "+")
+f <- P(z, c(1+1i,2-3i))
+
+
+###################################################
+### code chunk number 20: wp_figure_file
+###################################################
+png("wp_figure.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 21: wp_figure_plot
+###################################################
+persp(x, y, limit(Re(f)), xlab="Re(z)",ylab="Im(z)",zlab="Re(P(z))",
+theta=30, phi=30, r=1e9, border=NA, shade=0.8, expand=0.6)
+
+
+###################################################
+### code chunk number 22: wp_figure_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 23: thallerfig_file
+###################################################
+png("thallerfig.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 24: thallerfig_plot
+###################################################
+par(pty="s")
+view(x,y,f,code=0,real.contour=FALSE, imag.contour=FALSE,drawlabel=FALSE,col="red",axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos = -4)
+axis(2,pos = -4)
+lines(x=c(-4,4),y=c(4,4))
+lines(y=c(-4,4),x=c(4,4))
+
+
+###################################################
+### code chunk number 25: thallerfig_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 26: sigma_green_calc
+###################################################
+x <- seq(from= -12, to=12, len=n)
+y <- x
+z <- outer(x, 1i*y, "+")
+f <- sigma(z, c(1+1i,2-3i))
+
+
+###################################################
+### code chunk number 27: sigma_green_file
+###################################################
+png("sigma_green.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 28: sigma_green_plot
+###################################################
+par(pty="s")
+view(x,y,f,scheme=4,real.contour=FALSE,drawlabels=FALSE,axes=FALSE, xlab="Re(z)",ylab="Im(z)")
+axis(1,pos= -12)
+axis(2,pos= -12)
+lines(x=c(-12,12),y=c(12,12))
+lines(y=c(-12,12),x=c(12,12))
+lines(x=c(-12,12),y=-c(12,12))
+lines(y=c(-12,12),x=-c(12,12))
+
+
+###################################################
+### code chunk number 29: sigma_green_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 30: calculate_zeta
+###################################################
+zeta.z <- zeta(z, c(1+1i,2-3i))
+
+
+###################################################
+### code chunk number 31: zetafig_file
+###################################################
+png("zetafig.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 32: zetafig_plot
+###################################################
+par(pty="s")
+view(x,y,zeta.z,scheme=0,real.contour=FALSE,drawlabels=FALSE,xlab="Re(z)",ylab="Im(z)")
+
+
+###################################################
+### code chunk number 33: zetafig_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 34: calculate_sn
+###################################################
+jj <- seq(from=-40,to=40,len=n)
+m <- outer(jj,1i*jj,"+")
+f <- sn(u=5-2i,m=m,maxiter=1000)
+
+
+###################################################
+### code chunk number 35: sn_figure_file
+###################################################
+png("sn_figure.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 36: sn_figure_plot
+###################################################
+par(pty="s")
+ view(jj,jj,f,scheme=0,r0=1/5,real=T,imag=F,levels=c(0,-0.4,-1),drawlabels=F,xlab="Re(m)",ylab="Im(m)")
+
+
+###################################################
+### code chunk number 37: sn_figure_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 38: stag_calc
+###################################################
+     f <- function(z){1i*z^2}
+     x <- seq(from=-6,to=6,len=n)
+     y <- seq(from=-6,to=6,len=n)
+     z <- outer(x,1i*y,"+")
+
+
+###################################################
+### code chunk number 39: stag_point_file
+###################################################
+png("stag_point.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 40: stag_point_plot
+###################################################
+par(pty="s")
+view(x,y,f(z),nlevels=14,imag.contour=TRUE,real.cont=TRUE,scheme=-1, 
+     drawlabels=FALSE,
+     axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos=-6)
+axis(2,pos=-6)
+lines(x=c(-6,6),y=c(6,6))
+lines(y=c(-6,6),x=c(6,6))
+d1 <- c(-0.1,0,0.1)
+d2 <- c( 0.1,0,0.1)
+lines(x=d1,y=1+d2)
+lines(x=d1,y=-1-d2)
+lines(x=1-d2,y=d1)
+lines(x=-1+d2,y=d1)
+
+
+###################################################
+### code chunk number 41: stag_point_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 42: two_calc
+###################################################
+     f <- function(z){1i*log((z-1.7+3i)*(z-1.7-3i)/(z+1-0.6i)/(z+1+0.6i))}
+     x <- seq(from=-6,to=6,len=n)
+     y <- seq(from=-6,to=6,len=n)
+     z <- outer(x,1i*y,"+")
+
+
+###################################################
+### code chunk number 43: two_sources_two_sinks_file
+###################################################
+png("two_sources_two_sinks.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 44: two_sources_two_sinks_plot
+###################################################
+par(pty="s")
+view(x,y,f(z),nlevels=24,imag.contour=TRUE,real.cont=TRUE,scheme=17,power=0.1,drawlabels=FALSE,axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos=-6)
+axis(2,pos=-6)
+lines(x=c(-6,6),y=c(6,6))
+lines(y=c(-6,6),x=c(6,6))
+
+
+###################################################
+### code chunk number 45: two_sources_two_sinks_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 46: rect_calc3
+###################################################
+m <- 0.5
+K <- K.fun(m)
+iK <- K.fun(1-m)
+
+#b <- sn(1.8 + 0.8i, m=m) # 1.8 to the right and 0.8 up.
+#b <- 0 # middle bottom
+b <- sn(0 + 1i*iK/2,m=m)  #dead centre of the rectangle.
+#b <- -1 # lower left
+#b <- 1/sqrt(m) # top right
+#b <- -1/sqrt(m) # top left
+#b <- 1e9*1i # top centre
+
+
+a <- 1   #bottom right hand side corner
+
+
+f <- function(z){1i*log((z-a)*(z-Conj(a))/(z-b)/(z-Conj(b)))}
+
+     x <- seq(from=-K,to=K,len=n)
+     y <- seq(from=0,to=iK,len=n)
+     z <- outer(x,1i*y,"+")
+    fsn <- f(sn(z,m=m))
+
+
+###################################################
+### code chunk number 47: rectangle_pot_flow_file
+###################################################
+png("rectangle_pot_flow.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 48: rectangle_pot_flow_plot
+###################################################
+view(x,y,fsn,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=17,power=0.1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+rect(-K,0,K,iK,lwd=3)
+
+
+###################################################
+### code chunk number 49: rectangle_pot_flow_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 50: bacco_flow
+###################################################
+# Choose the size of the computational mesh:
+n <- n_BACCO
+
+# Choose the number of code observations for D1:
+n.code.obs <- 30
+
+# And the number of field observations for D2:
+n.field.obs <- 31
+
+# First, up the D1 design matrix.  Recall that D1 is the set of code
+# observations, which here means the observations of fluid speed when
+# the sink is at a known, specified, position.
+
+set.seed(0)
+
+latin.hypercube <- function (n, d){
+  sapply(seq_len(d), function(...) { (sample(1:n) - 0.5)/n })
+}
+
+
+D1.elliptic <- latin.hypercube(n.code.obs , 4)
+colnames(D1.elliptic) <- c("x","y","x.sink","y.sink")
+D1.elliptic[,c(1,3)] <- (D1.elliptic[,c(1,3)] -0.5)*2
+#D1.elliptic[,c(2,4)] <- D1.elliptic[,c(2,4)] *iK
+
+# now a D2 design matrix.  This is field observations: observations of
+# fluid speed when the sink is at the true, unknown, specified,
+# position.
+D2.elliptic <- latin.hypercube(n.field.obs , 2)
+colnames(D2.elliptic) <- c("x","y")
+D2.elliptic[,1] <- (D2.elliptic[,1] -0.5)*2
+
+
+# Now a function that, given x and y and x.sink and y.sink, returns
+# the log of the fluid speed at x,y:
+
+fluid.speed <- function(x.pos, y.pos, x.sink, y.sink){
+  
+  a <- 1   #bottom right hand side corner
+  b <- sn(x.pos/K + 1i*iK*y.pos,m=m)  #position (x.pos , y.pos)
+  f <- function(z){1i*log((z-a)*(z-Conj(a))/(z-b)/(z-Conj(b)))}
+  
+  x <- seq(from=-K,to=K,len=n)
+  y <- seq(from=0,to=iK,len=n)
+  z <- outer(x,1i*y,"+")
+  potential <- f(sn(z,m=m))
+  
+  get.log.ke <- function(x,y,potential){
+    jj <- Re(potential)
+    jj.x <- cbind(jj[,-1]-jj[,-ncol(jj)],0)
+    jj.y <- rbind(jj[-1,]-jj[-nrow(jj),],0)
+    kinetic.energy <- jj.x^2 + jj.y^2
+    n.x <- round(n * (x-(-1))/2)
+    n.y <- round(n * y)
+    return(log(kinetic.energy[n.x , n.y]+0.01))
+  }
+
+  return(get.log.ke(x.pos,y.pos,potential))
+}
+
+# now fill in code outputs y:
+y.elliptic <- rep(NA,nrow(D1.elliptic))
+for(i in 1:length(y.elliptic)){
+  jj <- D1.elliptic[i,,drop=TRUE]
+  y.elliptic[i] <- fluid.speed(jj[1],jj[2],jj[3],jj[4])
+}
+
+
+# Now do the field observations; here the source is known to be at the
+# centre of the rectangle:
+
+z.elliptic <- rep(NA,nrow(D2.elliptic))
+for(i in 1:length(z.elliptic)){
+  jj <- D2.elliptic[i,,drop=TRUE]
+  z.elliptic[i] <- fluid.speed(jj[1],jj[2],0,0.5)
+}
+
+# Create design matrix plus observations for didactic purposes:
+D1 <- round(cbind(D1.elliptic,observation=y.elliptic),2)
+D2 <- round(cbind(D2.elliptic,observation=z.elliptic),2)
+
+
+# create a data vector:
+d.elliptic <- c(y.elliptic , z.elliptic)
+
+#now a h1.toy() equivalent:
+h1.elliptic <- function(x){
+  out <- c(1,x[1])
+}
+
+#now a H1.toy() equivalent:
+ H1.elliptic <- 
+function (D1) 
+{
+    if (is.vector(D1)) {
+        D1 <- t(D1)
+    }
+    out <- t(apply(D1, 1, h1.elliptic))
+    colnames(out)[1] <- "h1.const"
+    return(out)
+}
+                      
+h2.elliptic <-
+  function(x){
+    c(1,x[1]) 
+  }
+
+H2.elliptic <-
+  function(D2){
+    if (is.vector(D2)) {
+      D2 <- t(D2)
+    }
+    out <- t(apply(D2, 1, h2.elliptic))
+    colnames(out)[1] <- "h2.const"
+    return(out)
+  }
+
+
+#Now an extractor function:
+extractor.elliptic <- 
+function (D1) 
+{
+    return(list(x.star = D1[, 1:2, drop = FALSE], t.vec = D1[, 
+        3:4, drop = FALSE]))
+}
+
+# Now a whole bunch of stuff to define a phi.fun.elliptic()
+# and, after that, to call it:
+phi.fun.elliptic <- 
+function (rho, lambda, psi1, psi1.apriori, psi2, psi2.apriori, 
+    theta.apriori, power) 
+{
+    "pdm.maker.psi1" <- function(psi1) {
+        jj.omega_x <- diag(psi1[1:2])
+        rownames(jj.omega_x) <- names(psi1[1:2])
+        colnames(jj.omega_x) <- names(psi1[1:2])
+        jj.omega_t <- diag(psi1[3:4])
+        rownames(jj.omega_t) <- names(psi1[3:4])
+        colnames(jj.omega_t) <- names(psi1[3:4])
+        sigma1squared <- psi1[5]
+        return(list(omega_x = jj.omega_x, omega_t = jj.omega_t, 
+            sigma1squared = sigma1squared))
+    }
+    "pdm.maker.psi2" <- function(psi1) {
+        jj.omegastar_x <- diag(psi2[1:2])
+        sigma2squared <- psi2[3]
+        return(list(omegastar_x = jj.omegastar_x, sigma2squared = sigma2squared))
+    }
+    jj.mean <- theta.apriori$mean
+    jj.V_theta <- theta.apriori$sigma
+    jj.discard.psi1 <- pdm.maker.psi1(psi1)
+    jj.omega_t <- jj.discard.psi1$omega_t
+    jj.omega_x <- jj.discard.psi1$omega_x
+    jj.sigma1squared <- jj.discard.psi1$sigma1squared
+    jj.discard.psi2 <- pdm.maker.psi2(psi2)
+    jj.omegastar_x <- jj.discard.psi2$omegastar_x
+    jj.sigma2squared <- jj.discard.psi2$sigma2squared
+    jj.omega_t.upper <- chol(jj.omega_t)
+    jj.omega_t.lower <- t(jj.omega_t.upper)
+    jj.omega_x.upper <- chol(jj.omega_x)
+    jj.omega_x.lower <- t(jj.omega_x.upper)
+    jj.a <- solve(solve(jj.V_theta) + 2 * jj.omega_t, solve(jj.V_theta, 
+        jj.mean))
+    jj.b <- t(2 * solve(solve(jj.V_theta) + 2 * jj.omega_t) %*% 
+        jj.omega_t)
+    jj.c <- jj.sigma1squared/sqrt(det(diag(nrow = nrow(jj.V_theta)) + 
+        2 * jj.V_theta %*% jj.omega_t))
+    jj.A <- solve(jj.V_theta + solve(jj.omega_t)/4)
+    jj.A.upper <- chol(jj.A)
+    jj.A.lower <- t(jj.A.upper)
+    list(rho = rho, lambda = lambda, psi1 = psi1, psi1.apriori = psi1.apriori, 
+        psi2 = psi2, psi2.apriori = psi2.apriori, theta.apriori = theta.apriori, 
+        power = power, omega_x = jj.omega_x, omega_t = jj.omega_t, 
+        omegastar_x = jj.omegastar_x, sigma1squared = jj.sigma1squared, 
+        sigma2squared = jj.sigma2squared, omega_x.upper = jj.omega_x.upper, 
+        omega_x.lower = jj.omega_x.lower, omega_t.upper = jj.omega_t.upper, 
+        omega_t.lower = jj.omega_t.lower, a = jj.a, b = jj.b, 
+        c = jj.c, A = jj.A, A.upper = jj.A.upper, A.lower = jj.A.lower)
+}
+
+# OK, that's the function defined.  Now to create some jj.* variables
+# to call it:
+
+jj.psi1 <- c(rep(1,4),0.3)
+names(jj.psi1)[1:4] <- colnames(D1.elliptic)
+names(jj.psi1)[5] <- "sigma1squared"
+
+jj.mean.psi1 <- rep(1,5)
+names(jj.mean.psi1) <- names(jj.psi1)
+
+jj.sigma.psi1 <- diag(0.1,nrow=5)
+rownames(jj.sigma.psi1) <- names(jj.psi1)
+colnames(jj.sigma.psi1) <- names(jj.psi1)
+
+jj.psi2 <- c(1,1,0.3)
+names(jj.psi2)[1:2] <- colnames(D2.elliptic)
+names(jj.psi2)[3] <- "sigma2squared"
+
+jj.mean.psi2 <- rep(1,4)
+names(jj.mean.psi2) <- c("x.sink", "y.sink","rho","lambda")
+
+jj.sigma.psi2 <- diag(0.1,4)
+rownames(jj.sigma.psi2) <- names(jj.mean.psi2)
+colnames(jj.sigma.psi2) <- names(jj.mean.psi2)
+
+jj.mean.th <- c(1,0.5)
+names(jj.mean.th) <- colnames(D1.elliptic)[3:4]
+
+jj.sigma.th <- diag(rep(1,2))
+rownames(jj.sigma.th) <- colnames(D1.elliptic)[3:4]
+colnames(jj.sigma.th) <- colnames(D1.elliptic)[3:4]
+
+# Now call phi.fun.elliptic():
+phi.elliptic <-
+  phi.fun.elliptic(
+                   rho=1,
+                   lambda=0.1,
+                   psi1=jj.psi1,
+                   psi2=jj.psi2,
+                   psi1.apriori=list(mean=jj.mean.psi1, sigma=jj.sigma.psi1),
+                   psi2.apriori=list(mean=jj.mean.psi2, sigma=jj.sigma.psi2),
+                   theta.apriori=list(mean=jj.mean.th, sigma=jj.sigma.th),
+                   power=2
+                   )
+
+# Now an E.theta.elliptic():
+E.theta.elliptic <- 
+function (D2 = NULL, H1 = NULL, x1 = NULL, x2 = NULL, phi, give.mean = TRUE) 
+{
+    if (give.mean) {
+        m_theta <- phi$theta.apriori$mean
+        return(H1(D1.fun(D2, t.vec = m_theta)))
+    }
+    else {
+        out <- matrix(0, 2,2)
+        rownames(out) <- c("h1.const","x")
+        colnames(out) <- c("h1.const","x")
+        return(out)
+    }
+}
+
+#Now an Edash.theta.elliptic().  Because the basis vector is not a
+#function of theta, this is a bit academic as we can use a function
+#that is identical to Edash.theta.toy():
+
+Edash.theta.elliptic <-
+function (x, t.vec, k, H1, fast.but.opaque = FALSE, a = NULL, 
+    b = NULL, phi = NULL) 
+{
+    if (fast.but.opaque) {
+        edash.mean <- a + crossprod(b, t.vec[k, ])
+    }
+    else {
+        V_theta <- phi$theta.apriori$sigma
+        m_theta <- phi$theta.apriori$mean
+        omega_t <- phi$omega_t
+        edash.mean <- solve(solve(V_theta) + 2 * omega_t, solve(V_theta, 
+            m_theta) + 2 * crossprod(omega_t, t.vec[k, ]))
+    }
+    jj <- as.vector(edash.mean)
+    names(jj) <- rownames(edash.mean)
+    edash.mean <- jj
+    return(H1(D1.fun(x, edash.mean)))
+}
+
+
+
+# Define a wrapper for equation 8:
+# First, calculate the constant to subtract to ensure that
+# the support has a maximum of about zero: 
+
+maximum.likelihood.support <- p.eqn8.supp(theta=c(0,1/2), D1=D1.elliptic, D2=D2.elliptic, H1=H1.elliptic, H2=H2.elliptic, d=d.elliptic, include.prior=FALSE, lognormally.distributed=FALSE, return.log=TRUE, phi=phi.elliptic)
+
+support <- function(x){
+p.eqn8.supp(theta=x, D1=D1.elliptic, D2=D2.elliptic, H1=H1.elliptic, H2=H2.elliptic, d=d.elliptic, include.prior=FALSE, lognormally.distributed=FALSE, return.log=TRUE, phi=phi.elliptic) - maximum.likelihood.support
+}
+
+#define a local function called optim() for aesthetic reasons (ie it 
+# improves the appearance of the  call to optim():
+
+optim <- 
+  function(par,fn){
+    stats::optim(par=par,fn=fn,control=list(fnscale = -1))$par
+  }
+
+
+
+###################################################
+### code chunk number 51: head_D1
+###################################################
+head(D1)
+
+
+###################################################
+### code chunk number 52: head_D2
+###################################################
+head(D2)
+
+
+###################################################
+### code chunk number 53: calc_b_sn
+###################################################
+b <- sn(D1[1,3] + 1i*D1[1,4],m=m) #point corresponding to first line of D1
+fsnz2 <- f(sn(z,m=m))
+
+
+###################################################
+### code chunk number 54: code_obs_file
+###################################################
+png("code_obs.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 55: code_obs_plot
+###################################################
+view(x,y,fsnz2,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=-1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+points(x=K*D1[1,1],y=D1[1,2]*iK,pch=4)
+rect(-K,0,K,iK,lwd=3)
+
+
+###################################################
+### code chunk number 56: code_obs_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 57: calc_b2
+###################################################
+b <- sn(0 + 1i*iK/2,m=m) 
+fsnzz <- f(sn(z,m=m))
+
+
+###################################################
+### code chunk number 58: true_flow_file
+###################################################
+png("true_flow.png",width=800,height=800)
+
+
+###################################################
+### code chunk number 59: true_flow_plot
+###################################################
+view(x,y,fsnzz,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=-1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+points(x=K*D2[,1],y=D2[,2]*iK,pch=4)
+rect(-K,0,K,iK,lwd=3)
+
+
+###################################################
+### code chunk number 60: true_flow_close
+###################################################
+null <- dev.off()
+
+
+###################################################
+### code chunk number 61: support
+###################################################
+support(c(0,1/2)) #centre of the rectangle
+support(c(-1,1))   #top left corner
+
+
+###################################################
+### code chunk number 62: mle_calc
+###################################################
+mle <- optim(c(0,1/2),support)
+
+
+###################################################
+### code chunk number 63: print_mle
+###################################################
+mle
+
+
+###################################################
+### code chunk number 64: support_of_mle
+###################################################
+support(mle)
+
+
diff --git a/inst/doc/ellipticpaper.Rnw b/inst/doc/ellipticpaper.Rnw
new file mode 100644
index 0000000..0b9d095
--- /dev/null
+++ b/inst/doc/ellipticpaper.Rnw
@@ -0,0 +1,1421 @@
+\documentclass[nojss]{jss}
+
+\usepackage{dsfont}
+\usepackage{bbm}
+\usepackage{amsfonts}
+\usepackage{wasysym}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%% just as usual
+\author{Robin K. S. Hankin}
+\title{Introducing \pkg{elliptic}, an \proglang{R} package for elliptic and
+  modular functions}
+%\VignetteIndexEntry{A vignette for the elliptic package}
+%% for pretty printing and a nice hypersummary also set:
+%% \Plainauthor{Achim Zeileis, Second Author} %% comma-separated
+\Plaintitle{Introducing elliptic, an R package for elliptic and
+  modular functions}
+\Shorttitle{Elliptic functions with \proglang{R}}
+
+%% an abstract and keywords
+\Abstract{
+
+This paper introduces the \pkg{elliptic} package of \proglang{R} routines, for
+numerical calculation of elliptic and related functions.  Elliptic
+functions furnish interesting and instructive examples of many ideas
+of complex analysis, and package \pkg{elliptic} illustrates these
+numerically and visually.  A statistical application in fluid
+mechanics is presented.
+
+An earlier version of this vignette was published as~\citet{hankin2006}. 
+}
+
+\Keywords{Elliptic functions, modular functions, Weierstrass elliptic
+functions, visualization of complex functions}
+
+
+
+
+\Address{
+  Robin K. S. Hankin\\
+  Auckland University of Technology\\
+  2-14 Wakefield Street\\
+  Auckland\\
+  New Zealand\\
+  E-mail: \email{hankin.robin at gmail.com}
+}
+
+
+%% need no \usepackage{Sweave.sty}
+\SweaveOpts{echo=FALSE}
+\begin{document}
+
+
+<<requirepackage,echo=FALSE,print=FALSE>>=
+require(elliptic,quietly=TRUE)
+@ 
+
+
+<<setOverallImageQuality>>=
+n <- 400
+n_BACCO <- 40
+@ 
+
+\section{Introduction}
+
+The elliptic functions crop up here and there in diverse areas of
+applied mathematics such as cosmology~\citep{kraniotis2002}, chemical
+engineering~\citep{koopman1991}, dynamical systems~\citep{kotus2003},
+and quantum mechanics~\citep{chow2002}; here they are applied to fluid
+mechanics~\citep{johnson2004,johnson2005}.  They also provide
+interesting and instructive non-elementary examples of many results in
+complex analysis such as Cauchy's integral theorem and the residue
+theorem.
+
+In this paper I introduce \pkg{elliptic}, a new \proglang{R} package for
+numerical calculation of Weierstrass and Jacobi elliptic functions,
+theta functions and modular functions.  The emphasis is on efficient
+numerical calculation, and informative visualization techniques.
+
+The package is available on CRAN, \url{http://cran.R-project.org/}
+\citep{rcore2005}. 
+
+\section{Elliptic functions}\label{section:introduction}
+
+This section gives a very brief introduction to elliptic functions.
+For more detail and rigorous derivations, the reader is referred to
+the classic literature: the standard reference would
+be~\cite{whittaker1952}.  \cite{chandrasekharan1985} approaches the
+field from a more modern perspective, and \cite{abramowitz1965}
+provide the definitive reference work for the case of real invariants.
+
+A meromorphic function~$f$ is said to be elliptic
+if~$\exists\,\omega_1,\omega_2\in\mathbbm{C}$
+with~$\omega_2/\omega_1\in\mathbbm{C}\backslash\mathbbm{R}$ such that
+
+\begin{equation}
+f(z)=f(z+2m\omega_1+2n\omega_2)
+\end{equation}
+whenever~$f(z)$ is defined and~$m,n\in\mathbbm{Z}$.  Notation in this
+paper is consistent with that of~\citet{abramowitz1965}; $\omega_1$
+and~$\omega_2$ are called the {\em half periods}.  In 1862,
+Weierstrass introduced his~$\wp$ function which is defined as
+\begin{equation}\label{direct.sum}
+\wp(z)=
+\frac{1}{z^2}+
+\sum_{m,n\in\mathbbm{Z}\atop m,n\neq 0}
+\left\{
+   \frac{1}{\left(z-2m\omega_1-2n\omega_2\right)^2}
+  -\frac{1}{\left(  2m\omega_1+2n\omega_2\right)^2}
+\right\}.
+\end{equation}
+The~$\wp$ function is, in a well defined sense, the simplest
+nontrivial elliptic function~\citep{whittaker1952}.  Given this, we
+have a Laurent expansion of the form
+\begin{equation}
+\wp(z)-z^{-2}=\frac{1}{20}g_2z^2+\frac{1}{28}g_3z^4+O(z^6)
+\end{equation}
+with
+\begin{equation}
+g_2=60{\sum}'\frac{1}{\left(2m\omega_1+2n\omega_2\right)^4},
+\qquad
+g_3=140{\sum}'\frac{1}{\left(2m\omega_1+2n\omega_2\right)^6},
+\end{equation}
+where a prime indicates summation over~$\mathbbm{Z}^2$
+excluding~$(m,n)=(0,0)$.  For reasons to be made clear in
+section~\ref{section.unimodularity}, $g_2$ and~$g_3$ are known as the
+{\em invariants}.  Other equivalent forms for~$\wp$ include its
+differential equation
+\begin{equation}\label{P.differential.eqn.definition}
+\left(\frac{d\wp}{dz}\right)^2=4\wp^3-g_2\wp-g_3
+\end{equation}
+and the relation
+\begin{equation}\label{P.integral.definition}
+z=\int_{t=w}^\infty\frac{dt}{\sqrt{4t^3-g_2t-g_3}}
+\end{equation}
+which is equivalent to~$w=\wp(z)$.
+
+Related functions include the zeta function~$\zeta(z)$, defined by 
+\begin{equation}\label{zeta.definition}
+\frac{d\zeta(z)}{dz}=-\wp(z)
+\end{equation}
+and the sigma function~$\sigma(z)$, defined by
+\begin{equation}\label{sigma.definition}
+\frac{d\log\sigma(z)}{dz}=\zeta(z),\qquad{\lim_{\mbox{\tiny $z\longrightarrow
+0$}}}\left[\frac{\sigma(z)}{z}\right]=1
+\end{equation}
+(neither~$\sigma(z)$ nor~$\zeta(z)$ is elliptic).  It may be
+shown\label{zeta.analytic} that~$\zeta(z)$ is analytic except for
+points on the lattice of periods, at which it has simple poles with
+residue~1.  One classic result is due to Legendre:
+if~$\omega_1,\omega_2$ is a pair of basic periods\footnote{A pair of
+basic periods is one that generates the period lattice.  Basic periods
+are not unique as many pairs of periods may generate the same lattice.
+However, there is one pair of basic periods, the {\em fundamental}
+periods that are, in a well-defined sense,
+optimal~\citep{chandrasekharan1985}.},
+with~$\rm{Im}(\omega_2/\omega_1)>0$, then
+\begin{equation}
+\eta_1\omega_2-\eta_2\omega_1=\pi i\label{legendre}
+\end{equation}
+where~$\eta_1=\zeta(\omega_1)$ and~$\eta_2=\zeta(\omega_2)$.
+
+\subsection{Jacobian elliptic functions}
+Jacobi approached the description of elliptic functions from a
+different perspective~\citep{weisstein2005}.  Given~$m=k^2$ and~$m_1$
+with~$m+m_1=1$, Jacobi showed that if
+\[
+u=\int_{t=0}^\phi\frac{dt}{\sqrt{(1-t^2)(1-mt^2)}}
+\]
+the functions~${\rm sn}(u,k)$, ${\rm cn}(u,k)$ and~${\rm dn}(u,k)$
+defined by
+\begin{equation}\label{sn.definition}
+{\rm sn} u=\sin\phi,\qquad
+{\rm cn} u=\cos\phi,\qquad
+{\rm dn} u=\sqrt{1-k^2\sin^2\phi}
+\end{equation}
+are elliptic with periods 
+\begin{equation}
+K=\int_{\theta=0}^{\pi/2}\frac{d\theta}{\sqrt{1-m\sin^2\theta}}
+\end{equation}
+and
+\begin{equation}
+iK'=i\int_{\theta=0}^{\pi/2}\frac{d\theta}{\sqrt{1-m_1\sin^2\theta}}.
+\end{equation}
+The Jacobian elliptic functions are encountered in a variety of
+contexts and bear a simple analytical relation with the
+Weierstrass~$\wp$ function.
+
+\section{Numerical evaluation and Jacobi's theta functions}
+
+Although equation~\ref{direct.sum} is absolutely convergent, it
+converges too slowly to be of use in practical work, and an alternative
+definition is needed.
+
+Jacobi presented his four theta functions in 1829 and, although they
+have interesting properties in their own right, here they are used to
+provide efficient numerical methods for calculation of the elliptic
+functions above.  They are defined as follows:
+
+\begin{eqnarray}\label{theta.defn}
+\theta_1(z,q)&=&2q^{1/4}\sum_{n=0}^\infty(-1)^nq^{n(n+1)}\sin(2n+1)z\\
+\theta_2(z,q)&=&2q^{1/4}\sum_{n=0}^\infty q^{n(n+1)}\cos(2n+1)z\\
+\theta_3(z,q)&=&1+2\sum_{n=1}^\infty q^{n^2}\cos 2nz\\
+\theta_4(z,q)&=&1+2\sum_{n=1}^\infty(-1)^n q^{n^2}\cos 2nz
+\end{eqnarray}
+It may be seen that, provided~$|q|<1$, the series converges for
+all~$z\in\mathbbm{C}$.  Indeed, the convergence is very rapid: the
+number of correct significant figures goes as the square of the number
+of terms.  It should be noted that there are different notations in
+use, both for the four function names, and for the independent
+variables.
+
+All the functions described in section~\ref{section:introduction} may
+be expressed in terms of the theta functions.  This is the default
+method in \pkg{elliptic}, although alternative algorithms are
+implemented where possible to provide a numerical and notational
+check.
+
+For example, Weierstrass's~$\wp$ function is given by
+\begin{equation}\label{P.in.terms.of.theta}
+\wp(z)= 
+\frac{\pi^2}{4\omega_1^2}\left(
+   \frac{\theta_1'(0,q)\theta_2(v,q)}{\theta_2(0,q)\theta_1(v,q)}
+\right)^2
+\end{equation}
+where~$q=e^{i\omega_2/\omega_1}$; the other functions have similar
+theta function definitions.
+
+<<require_packages, echo=FALSE,print=FALSE>>=
+<<results=hide>>=
+require(elliptic)
+require(emulator)
+require(calibrator)
+@ 
+
+\section[Package ''elliptic'' in use]{Package \pkg{elliptic} in use}
+
+This section shows \pkg{elliptic} being used in a variety of contexts.
+First, a number of numerical verifications of the code are presented;
+then, elliptic and related functions are visualized using the function
+\code{view()}; and finally, the package is used to calculate flows
+occurring in an oceanographic context.
+
+The primary function in package \pkg{elliptic} is~\code{P()}: this
+calculates the Weierstrass~$\wp$ function, and may take named
+arguments that specify either the invariants~\code{g} or half
+periods~\code{Omega}:
+<<simple_usage_of_P,echo=TRUE,print=FALSE>>=
+z <- 1.9+1.8i
+P(z,g=c(1,1i))
+P(z,Omega=c(1,1i))
+@ 
+
+\subsection{Numerical verification}
+
+Work in the field of elliptic functions is very liable to
+mistakes\footnote{\cite{abramowitz1965} state that there is a
+``bewildering'' variety of notations in use; the situation has become
+more confusing in the intervening 40 years.}, and package
+\pkg{elliptic} includes a number of numerical checks to guard against
+notational inexactitude.  These checks generally use the convenient
+(trivial) function \code{maxdiff()} that shows the maximum absolute
+difference between its two arguments:
+<<define_maxdiff,echo=TRUE,print=FALSE>>=
+maxdiff <- function(x,y){max(abs(x-y))}
+@ 
+
+For example, we may compare the output of \code{P()}, which uses
+equation~\ref{P.in.terms.of.theta}, against the straightforward
+Laurent expansion, used by \code{P.laurent()}:
+
+<<laurent,echo=TRUE,print=FALSE>>=
+g <- c(3,2+4i)
+z <- seq(from=1,to=0.4+1i,len=34)
+<<maxdiff_laurent,echo=TRUE,print=TRUE>>=
+maxdiff(P(z,g), P.laurent(z,g))
+@
+
+showing reasonable agreement; note that function \code{P()} uses the
+conceptually distinct theta function formula of
+equation~\ref{P.in.terms.of.theta}.  Package \pkg{elliptic} includes a
+large number of such numerical verification tests in the \code{test}
+suite provided in the package, but perhaps more germane is the
+inclusion of named identities appearing in \cite{abramowitz1965}.  For
+example, consider function~\code{e18.10.9()}, named for the equation
+number of the identity appearing on page 650.  This function returns
+the difference between the (algebraically identical) left and right
+hand side of three grouped identities:
+\begin{eqnarray}
+  12\omega_1^2e_1 &=& \hphantom{-}\pi^2\left[\theta_3^4(0,q)+\theta_4^4(0,q)\right]\nonumber\\
+  12\omega_1^2e_2 &=& \hphantom{-}\pi^2\left[\theta_2^4(0,q)-\theta_4^4(0,q)\right]\\
+  12\omega_1^2e_3 &=&           - \pi^2\left[\theta_3^4(0,q)+\theta_4^4(0,q)\right]\nonumber
+\end{eqnarray}
+where~$q=e^{-\pi K'/K}$.  From the manpage:
+
+<<abs_e18.10.9,echo=TRUE,print=TRUE>>=
+abs( e18.10.9(parameters(g=g)))
+@
+again showing reasonably accurate numerical results, but perhaps
+more importantly explicitly verifying that the notational scheme used
+is internally consistent.
+
+Although the examples above use the invariants~\code{g2} and \code{g3}
+to define the elliptic function and its periods, sometimes the
+fundamental periods are known and the invariants are desired.  This is
+done by function \code{g.fun()}, which takes the fundamental periods
+as arguments and returns the two invariants~$g_2$ and~$g_3$.  Observe
+that there are many pairs of basic periods that generate the same
+lattice---see figure~\ref{latplot}---but it usual to specify the
+unique {\em fundamental periods} as this pair usually has desirable
+numerical convergence properties.
+
+\begin{figure}[htbp]
+  \begin{center}
+<<lattice_figure,fig=TRUE>>=
+jj <- parameters(g=c(1+1i,2-3i))$Omega
+latplot(jj,xlim=c(-4,4),ylim=c(-4,4),xlab="Re(z)",
+     ylab="Im(z)")
+polygon(Re(c(jj[1],sum(jj),jj[2],0)),
+        Im(c(jj[1],sum(jj),jj[2],0)),lwd=2,col="gray90",pch=16,cex=3)
+
+kk <- -c(3*jj[1] + 2*jj[2] , jj[1] + jj[2])  #det(matrix(c(3,2,1,1),2,2,T)==1
+
+polygon(Re(c(kk[1],sum(kk),kk[2],0)),
+        Im(c(kk[1],sum(kk),kk[2],0)),lwd=2,col="gray30",pch=16,cex=3)
+@
+\caption{The\label{latplot} lattice generated by~$\wp(z;1+i,2-3i)$;
+  fundamental period parallelogram shown in light gray and a basic
+  period parallelogram shown in darker gray}
+  \end{center}
+\end{figure}
+
+\subsubsection{Unimodularity}\label{section.unimodularity}
+Many functions of the package are {\em unimodular}.  The
+invariants~$g_2$ and~$g_3$ are defined in terms of a pair of basic
+periods~$\omega_1$ and~$\omega_2$.  However, any pair of basic periods
+should have the same invariants, because any pair of basic periods
+will define the same elliptic function (hence the name).  Basic period
+pairs are related by a unimodular transformation:
+if~$\omega_1,\omega_2$ and~$\tilde{\omega}_1,\tilde{\omega}_2$ are two
+pairs of basic periods then there exist integers~$a,b,c,d$
+with~$ad-bc=1$ and
+\[
+\left(
+\begin{array}{cc}
+a&b\\
+c&d
+\end{array}
+\right)
+\left(
+\!\!
+\begin{array}{c}
+\omega_1\\
+\omega_2
+\end{array}
+\!\!
+\right)=
+\left(\!\!
+\begin{array}{c}
+\tilde{\omega}_1\\
+\tilde{\omega}_2
+\end{array}
+\!\!
+\right)
+\]
+
+Formally, a
+unimodular function~$f(\cdot,\cdot)$ is one with arity~2---it is
+conventional to write~$f(\mathbf{v})=f(a,b)$---and for which
+\begin{equation}
+f(\mathbf{v})=f(\mathbf{M}\mathbf{v})\end{equation} where~$\mathbf{M}$
+is unimodular: that is, an integer matrix with a determinant of unity.
+In this context, unimodular matrices (and the transformations they
+define) are interesting because any two pairs of basic periods are
+related by a unimodular transformation.
+
+The package includes
+functions that generate unimodular matrices.  The underlying function
+is \code{congruence()}, which generates~$2\times 2$ integer matrices
+with a determinant of~1, given the first row.  For example:
+<<congruence,echo=TRUE,print=TRUE>>=
+M <- congruence(c(4,9))
+@
+(observe that the determinant of~$\mathbf{M}$ is unity) and thus we
+may verify the unimodularity of, for example, \code{g.fun()} by
+evaluating the invariants for a pair of fundamental periods, and
+comparing this with the invariants calculated for a pair of basic
+periods that are related to the fundamental periods by a unimodular
+transformation (here~$\mathbf{M}$).  In \proglang{R} idiom:
+<<define_o,echo=TRUE,print=FALSE>>=
+o <- c(1,1i)
+<<maxdiff_o,echo=TRUE,print=TRUE>>=
+maxdiff(g.fun(o), g.fun(M %*% o,maxiter=800))
+@
+showing that the invariants for period pair~$o=(1,i)^T$ are almost
+identical to those for period
+pair~$o'=\mathbf{M}o=(4+9i,3+7i)^T$.  Observe that the latter
+evaluation requires many more iterations for accurate numerical
+evaluation: this behaviour is typically encountered when considering
+periods whose ratio is close to the real axis.
+
+In addition, function \code{unimodular()} generates unimodular
+matrices systematically, and function \code{unimodularity()} checks
+for a function's being unimodular.
+
+\subsubsection{Contour integration and the residue theorem}
+
+As noted in section~\ref{zeta.analytic}, the zeta function~$\zeta(z)$
+possesses a simple pole of residue~1 at the origin.  The residue
+theorem would imply that
+\[
+\varoint\zeta(z)\,dz=2\pi i
+\]
+when the contour is taken round a path that encircles the origin but
+no other poles.  This may be verified numerically using
+\pkg{elliptic}'s \code{myintegrate} suite of functions, which
+generalize the \pkg{stats} package's \code{integrate()} function to
+the complex plane.  Here, function \code{integrate.contour()} is used
+to integrate round the unit circle.  This function takes three
+arguments: first, the function to be integrated; second, a function
+that describes the contour to be integrated along; and third, a
+function that describes the derivative of the contour.  We may now
+integrate over a closed loop, using arguments~\code{u}
+and~\code{udash} which specify a contour following the unit circle:
+
+<<u_udash,echo=FALSE,print=FALSE>>=
+u <- function(x){exp(pi*2i*x)}
+udash <- function(x){pi*2i*exp(pi*2i*x)}
+Zeta <- function(z){zeta(z,g)}
+Sigma <- function(z){sigma(z,g)}
+WeierstrassP <- function(z){P(z,g)}
+@ 
+
+<<integrate,echo=TRUE,print=FALSE>>=
+jj <- integrate.contour(Zeta,u,udash)
+<<maxdiff_integrate,echo=TRUE,print=TRUE>>=
+maxdiff(jj, 2*pi*1i)
+@
+showing reasonable numerical accuracy.  Compare Weierstrass's~$\wp$
+function, which has a second order pole at the origin:
+<<abs_integrate,echo=TRUE,print=TRUE>>=
+abs(integrate.contour(WeierstrassP,u,udash))
+@
+
+\subsubsection[The PARI system]{The \proglang{PARI} system}
+Perhaps the most convincing evidence for numerical accuracy and
+consistency of notation in the software presented here is provided by
+comparison of the package's results with that of
+\proglang{PARI}~\citep{batut2000}.  The \proglang{PARI} system is an open-source project
+aimed at number theorists, with an emphasis on pure mathematics; it
+includes some elliptic function capability.  Function \code{P.pari()}
+of package \pkg{elliptic} calls the \code{pari} system directly to
+evaluate elliptic functions from within an \proglang{R} session, enabling quick
+verification:
+
+\begin{Schunk}
+\begin{Sinput}
+> omega <- c(1,1i)
+\end{Sinput}
+\end{Schunk}
+\begin{Schunk}
+\begin{Sinput}
+> z <- seq(from=pi,to=pi*1i,len=10)
+\end{Sinput}
+\end{Schunk}
+\begin{Schunk}
+\begin{Sinput}
+> maxdiff(P.pari(z,Omega=omega),  P(z,params=parameters(Omega=omega)))
+\end{Sinput}
+\begin{Soutput}
+[1] 2.760239e-14
+\end{Soutput}
+\end{Schunk}
+
+again showing reasonable agreement, this time between two independent
+computational systems.
+
+\subsection{Visualization of complex functions}
+
+In the following, a Weierstrass elliptic function with invariants
+of~$1+i$ and~$2-3i$ will be considered.  The half periods
+$\omega_1,\omega_2$ are first evaluated:
+
+<<jj_omega,echo=TRUE,print=FALSE>>=
+jj.omega <- half.periods(g=c(1+1i,2-3i))
+@ 
+and these may be visualized by using \code{latplot()}, as in
+figure~\ref{latplot}.  Figure~\ref{P.persp.re} shows the real part of
+such a function, shown over part of the complex plane, and
+figure~\ref{P.view} shows the same function using the \code{view()}
+function.
+
+<<calculate_wp_figure,echo=FALSE,print=FALSE,cache=TRUE>>=
+x <- seq(from=-4, to=4, len=n)
+y <- x
+z <- outer(x,1i*x, "+")
+f <- P(z, c(1+1i,2-3i))
+@ 
+
+
+%% Thanks to Dario Strbenac for the following structure
+<<wp_figure_file>>=   
+png("wp_figure.png",width=800,height=800)
+@ 
+
+<<label=wp_figure_plot>>=
+persp(x, y, limit(Re(f)), xlab="Re(z)",ylab="Im(z)",zlab="Re(P(z))",
+theta=30, phi=30, r=1e9, border=NA, shade=0.8, expand=0.6)
+@ 
+
+<<label=wp_figure_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{wp_figure.png}
+    \caption{Real part of~$\wp(z,1,1+2i)$.  Note \label{P.persp.re}
+    the second order poles at each lattice point}
+  \end{center}
+\end{figure}
+
+
+<<thallerfig_file>>=
+png("thallerfig.png",width=800,height=800)
+@ 
+
+<<label=thallerfig_plot>>=
+par(pty="s")
+view(x,y,f,code=0,real.contour=FALSE, imag.contour=FALSE,drawlabel=FALSE,col="red",axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos = -4)
+axis(2,pos = -4)
+lines(x=c(-4,4),y=c(4,4))
+lines(y=c(-4,4),x=c(4,4))
+@ 
+
+<<label=thallerfig_close>>=
+null <- dev.off()
+@ 
+
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{thallerfig.png}
+    \caption{Visualization of~$\wp(z,1,1+2i)$\label{P.view} using the
+      scheme of \cite{thaller1998}: white corresponds to a pole, black
+      to a zero, and full saturation to~$|\wp(z)|=1$.  The poles
+      of~$\wp(z)$ occur on a regular lattice, and the zeros on two
+      shifted lattices.  Note how each of the poles is surrounded by
+      two cycles of hue, indicating that they are of second order; and
+      each of the zeros is surrounded by one cycle of hue, indicating
+      that they are simple roots}
+  \end{center}
+\end{figure}
+
+The~$\sigma$ function with the same invariants is visualized in
+figure~\ref{sigma.green}, showing that its zeros lie on the same
+lattice as figure~\ref{latplot}.
+
+<<sigma_green_calc,cache=TRUE,echo=FALSE,print=FALSE>>=
+x <- seq(from= -12, to=12, len=n)
+y <- x
+z <- outer(x, 1i*y, "+")
+f <- sigma(z, c(1+1i,2-3i))
+@ 
+
+<<sigma_green_file>>=
+png("sigma_green.png",width=800,height=800)
+@ 
+
+<<sigma_green_plot>>=
+par(pty="s")
+view(x,y,f,scheme=4,real.contour=FALSE,drawlabels=FALSE,axes=FALSE, xlab="Re(z)",ylab="Im(z)")
+axis(1,pos= -12)
+axis(2,pos= -12)
+lines(x=c(-12,12),y=c(12,12))
+lines(y=c(-12,12),x=c(12,12))
+lines(x=c(-12,12),y=-c(12,12))
+lines(y=c(-12,12),x=-c(12,12))
+@ 
+
+
+<<sigma_green_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{sigma_green.png}
+    \caption{Visualization of~$f=\sigma(z,1,1+2i)$ using
+    \code{view()}; colour indicates~${\rm Arg}(f)$.  Thus points at
+    which~$f(z)$ is on the negative real axis, that is
+    $\{z:f(z)\in\mathbbm{R}^-\}$, are visible as discontinuities of
+    (colourimetric) value.  These discontinuities are semi-infinite;
+    note that the zeros of~$f$ occur, \label{sigma.green} at the
+    (finite) end of each line, on a regular lattice.  As~$|z|$
+    increases, each discontinuity threads its way through an
+    increasing number of other discontinuities and zeros, and the
+    spacing between the discontinuities becomes less and less}
+  \end{center}
+\end{figure}
+
+Figure~\ref{zeta.thaller} shows the zeta function, and
+figure~\ref{sn.thaller} shows Jacobi's ``sn'' function.
+
+<<calculate_zeta,echo=FALSE,print=FALSE,cache=TRUE>>=
+zeta.z <- zeta(z, c(1+1i,2-3i))
+@ 
+
+<<zetafig_file>>=
+png("zetafig.png",width=800,height=800)
+@ 
+
+<<label=zetafig_plot>>=
+par(pty="s")
+view(x,y,zeta.z,scheme=0,real.contour=FALSE,drawlabels=FALSE,xlab="Re(z)",ylab="Im(z)")
+@ 
+
+<<label=zetafig_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{zetafig.png}
+    \caption{Visualization of~$\zeta(z,1,1+2i)$ using \code{view()}
+    and the colouring scheme of Thaller.  Poles appear as white
+    regions, and zeros as black regions. \label{zeta.thaller} Each
+    pole is of single order, each zero is a simple root (one cycle of
+    hue).  The poles occur on a lattice; there is no simple pattern to
+    the zeros.  Note the overall tendency towards the edges of the
+    square to whiteness: $|f|$ increases with~$|z|$ as per
+    equation~\ref{zeta.definition}}
+  \end{center}
+\end{figure}
+
+<<calculate_sn,echo=FALSE,print=FALSE,cache=TRUE>>=
+jj <- seq(from=-40,to=40,len=n)
+m <- outer(jj,1i*jj,"+")
+f <- sn(u=5-2i,m=m,maxiter=1000)
+@ 
+
+<<sn_figure_file>>=
+png("sn_figure.png",width=800,height=800)
+@ 
+
+<<sn_figure_plot>>=
+par(pty="s")
+ view(jj,jj,f,scheme=0,r0=1/5,real=T,imag=F,levels=c(0,-0.4,-1),drawlabels=F,xlab="Re(m)",ylab="Im(m)")
+@ 
+
+<<sn_figure_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{sn_figure.png}
+    \caption{Jacobi's ``sn'' function\label{sn.thaller} using the elliptic
+      package.  Here, $f={\rm sn}(5-2i,m)$ is visualized, with background
+      utilizing Thaller's scheme, and contours of equal~${\rm Re}(f)$ at
+      three selected values shown as black lines.  Note the aperiodic
+      arrangement of poles (white areas) and zeros (black areas)}
+  \end{center}
+\end{figure}
+
+
+\subsection{Potential flow}
+
+One application of complex analysis is to fluid dynamics.  In
+particular, potential flow (steady, two-dimensional, inviscid,
+incompressible) may be studied with the aid of analytic complex
+functions.  Here I show how the elliptic functions discussed in this
+paper may be used to simulate potential flow in a rectangular domain.
+
+Although the tenets of potential flow appear to be absurdly
+idealized\footnote{\cite{feynman1966} famously described potential flow
+as the study of ``dry water''}, it is nevertheless a useful technique
+in many branches of practical fluid mechanics: it is often used to
+calculate a ``theoretical'' flowfield with which measured velocities
+may be compared.  A short sketch of potential theory is given here but
+the reader is referred to~\cite{milne1949} for a full exposition.
+Briefly, we define a {\em complex potential} $w(z)$ to be a complex
+function
+\[
+w(z)=\phi+i\psi\] and observe that both~$\phi$ and~$\psi$ obey
+Laplace's equation if~$w$ is differentiable.  Given this, we may take
+the velocity vector~$\mathbf{v}=(v_x,v_y)$ of the fluid to be
+\[
+v_x=\frac{\partial\phi}{\partial x},\qquad
+v_y=\frac{\partial\phi}{\partial y},\qquad
+\]
+and observe that streamlines are given by contours of equal~$\psi$;
+contours of equal~$\phi$ are called equipotential lines.  The two
+systems of lines cross at right angles (this follows from the
+Cauchy-Riemann conditions).
+
+Consider, for example, the function~$w(z)=z^2$, whose associated flow
+field is shown in figure~\ref{z.squared.pot.flow}.  This corresponds
+to a stagnation point, at which the speed vanishes; the streamlines
+(solid) intersect the equipotential lines (dashed) at right angles.
+
+<<stag_calc,echo=FALSE,print=FALSE,cache=TRUE>>=
+     f <- function(z){1i*z^2}
+     x <- seq(from=-6,to=6,len=n)
+     y <- seq(from=-6,to=6,len=n)
+     z <- outer(x,1i*y,"+")
+@ 
+
+
+<<stag_point_file>>=
+png("stag_point.png",width=800,height=800)
+@ 
+
+<<stag_point_plot>>=
+par(pty="s")
+view(x,y,f(z),nlevels=14,imag.contour=TRUE,real.cont=TRUE,scheme=-1, 
+     drawlabels=FALSE,
+     axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos=-6)
+axis(2,pos=-6)
+lines(x=c(-6,6),y=c(6,6))
+lines(y=c(-6,6),x=c(6,6))
+d1 <- c(-0.1,0,0.1)
+d2 <- c( 0.1,0,0.1)
+lines(x=d1,y=1+d2)
+lines(x=d1,y=-1-d2)
+lines(x=1-d2,y=d1)
+lines(x=-1+d2,y=d1)
+@ 
+
+<<stag_point_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{stag_point.png}
+    \caption{Potential flow on the complex plane:
+    field\label{z.squared.pot.flow} corresponding to the
+    function~$(z)=z^2$.  Solid lines represent streamlines and dotted
+    lines represent equipotentials; these intersect at right angles.
+    Note stagnation point at the origin}
+  \end{center}
+\end{figure}
+
+Now consider a slightly more complicated case.  A point source of
+strength~$m$ at~$z_0$ may be represented by the function
+\[m\log(z-z_0)\] (a sink corresponds to~$m<0$).  Any finite number
+of sources or sinks may be combined, as in~$\sum_i m_i\log(z-z_i)$
+where the~$i^{\rm th}$ source is at~$z_i$ and has strength~$m_i$,
+because the system is linear\footnote{It is often more convenient to
+work with the algebraically equivalent form~$\log\left(\prod
+(z-z_i)^{m_i}\right)$, as there are fewer branch cuts to deal with.}.
+Figure~\ref{upper.halfplane.flow} shows two sources and two sinks, all
+of equal strength.  Because the flowfield is symmetric with respect to
+the real axis, there is no flux across it; we may therefore ignore the
+flow in the lower half plane (ie~$\{z:\rm{Im}(z)<0\}$) and consider
+the flow to be bounded below by the real axis.  This is known as {\em
+the method of images}~\citep{milne1949}.
+
+
+<<two_calc,echo=FALSE,print=FALSE,cache=TRUE>>=
+     f <- function(z){1i*log((z-1.7+3i)*(z-1.7-3i)/(z+1-0.6i)/(z+1+0.6i))}
+     x <- seq(from=-6,to=6,len=n)
+     y <- seq(from=-6,to=6,len=n)
+     z <- outer(x,1i*y,"+")
+@ 
+
+
+<<two_sources_two_sinks_file>>=
+png("two_sources_two_sinks.png",width=800,height=800)
+@ 
+<<label=two_sources_two_sinks_plot>>=
+par(pty="s")
+view(x,y,f(z),nlevels=24,imag.contour=TRUE,real.cont=TRUE,scheme=17,power=0.1,drawlabels=FALSE,axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos=-6)
+axis(2,pos=-6)
+lines(x=c(-6,6),y=c(6,6))
+lines(y=c(-6,6),x=c(6,6))
+@ 
+
+<<two_sources_two_sinks_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{two_sources_two_sinks.png}
+    \caption{Potential flow in on the complex plane: two sources and
+    two sinks, all of equal strength.  Solid
+    \label{upper.halfplane.flow} lines denote streamlines, dotted
+    lines equipotentials; colour scheme uses the \code{hcl()} system:
+    hue varies with the argument, and chrominance varies with the
+    modulus, of the potential.  There is no flux between the lower and
+    the upper half plane, but there is flux out of, and in to, the
+    diagram.  Note the stagnation point at approximately $5+0i$}
+  \end{center}
+\end{figure}
+
+Now, one may transform a potential flowfield into another form using a
+conformal mapping from the~$z$- plane to the~$\zeta$- plane,
+traditionally denoted
+\[
+\zeta=f(z).
+\]
+
+This technique finds application when flow is desired (in the~$\zeta$-
+plane) that obeys some specific boundary condition that is simple to
+specify in the~$z$- plane. 
+
+%In the present case, we make use of the Schwartz-Christoffel theorem,
+%which states that if~$a,b,c,\ldots$ are~$n$ points on the real axis of
+%the~$\zeta$- plane with~$a<b<c<\ldots$,
+%and~$\alpha,\beta,\gamma,\ldots$ the interior angles of a simple
+%closed polygon of~$n$ vertices, then 
+%\begin{equation}
+%\frac{dz}{d\zeta}=K
+%\left(\zeta-a\right)^{\alpha/\pi-1}
+%\left(\zeta-b\right)^{\beta/\pi-1}
+%\left(\zeta-c\right)^{\gamma/\pi-1}
+%\ldots
+%\end{equation}
+%transforms the real axis of the~$\zeta$- plane into the boundary of a
+%closed polygon in the~$z$- plane with interior
+%angles~$\alpha,\beta,\ldots$.  If the polygon is simple, then the
+%upper half of the~$\zeta$- plane maps to the interior of the polygon.
+%
+%Here the Schwartz Christoffel theorem~\cite{milne1949} is applied to a
+%rectangle, in which~$\alpha=\beta=\gamma=\delta=\pi/2$.  With suitably
+%chosen~$a,b,c,d$ we see that the map from the upper half plane of
+%the~$\zeta$- plane to a rectangle in the~$z$- plane is given by
+
+In this case, we seek a conformal transformation that maps the upper
+half plane to a rectangle.  If we consider the flowfield shown in
+figure~\ref{upper.halfplane.flow}, then the map given by
+\[
+ \zeta=\int\frac{dz}{\sqrt{(1-a^2z^2)(1-z^2)}}
+\]
+takes the upper half plane of the~$\zeta$- plane to a rectangle in
+the~$z$- plane ~\citep{milne1949}.  Using
+equation~\ref{sn.definition}, this is equivalent to~$z={\rm
+sn}(\zeta;m)$, where~${\rm sn}(\cdot,\cdot)$ is a Jacobian elliptic
+function and~$m$ a constant of integration.
+
+Figure~\ref{box.flow} shows the resulting flow field: observe how the
+flow speed, which is proportional to the spacing between the
+streamlines, is very small near the left-hand edge of the rectangle.
+
+
+<<rect_calc3,echo=FALSE,print=FALSE,cache=TRUE>>=
+m <- 0.5
+K <- K.fun(m)
+iK <- K.fun(1-m)
+
+#b <- sn(1.8 + 0.8i, m=m) # 1.8 to the right and 0.8 up.
+#b <- 0 # middle bottom
+b <- sn(0 + 1i*iK/2,m=m)  #dead centre of the rectangle.
+#b <- -1 # lower left
+#b <- 1/sqrt(m) # top right
+#b <- -1/sqrt(m) # top left
+#b <- 1e9*1i # top centre
+
+
+a <- 1   #bottom right hand side corner
+
+
+f <- function(z){1i*log((z-a)*(z-Conj(a))/(z-b)/(z-Conj(b)))}
+
+     x <- seq(from=-K,to=K,len=n)
+     y <- seq(from=0,to=iK,len=n)
+     z <- outer(x,1i*y,"+")
+    fsn <- f(sn(z,m=m))
+@ 
+
+
+<<rectangle_pot_flow_file>>=
+png("rectangle_pot_flow.png",width=800,height=800)
+@ 
+
+<<rectangle_pot_flow_plot>>=
+view(x,y,fsn,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=17,power=0.1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+rect(-K,0,K,iK,lwd=3)
+@ 
+
+<<rectangle_pot_flow_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{rectangle_pot_flow.png}
+    \caption{Potential flow in a rectangle of aspect ratio~2: source
+    and sink of equal \label{box.flow} strength.  Colour scheme as in
+    figure~\ref{upper.halfplane.flow}.  Note the dividing streamline
+    which terminates in a stagnation point on the rectangle boundary}
+  \end{center}
+\end{figure}
+
+\subsection{Bayesian analysis of potential flow}
+
+When considering potential flows, it is often necessary to infer the
+locations of singularities in the flow from sparse and imperfect
+data~\citep{johnson2004}.
+
+Here, I apply the methods of~\cite{kennedy2001}
+and~\cite{kennedy2001a} (hereafter KOH and KOHa respectively) using
+the~\pkg{BACCO} package~\citep{hankin2005} to assess some
+characteristics of potential flow in a rectangle.
+
+Kennedy and O'Hagan considered the following inference problem for a
+set of parameters~$\theta\in{\mathcal R}^q$ that are inputs to a
+computer program.  Given an independent variable~$x\in{\mathcal R}^n$,
+and a set of scalar (``field'') observations~$z=z(x)$, they assume
+
+\begin{equation}
+z(x)=\rho\cdot\eta\left(x,\theta\right)+
+\delta(x)+\epsilon
+\end{equation}
+where~$\rho$ is a constant of proportionality (notionally unity);
+$\eta(\cdot,\cdot)$ a Gaussian process with unknown coefficients;
+$\theta$ the true, but unknown parameter values; $\delta(\cdot)$ a
+model inadequacy term (also a Gaussian process with unknown
+coefficients); and~$\epsilon\sim N(0,\lambda^2)$ uncorrelated normal
+observational errors.
+
+Inferences about~$\eta(\cdot,\cdot)$ are made from point observations
+of the process: Kennedy and O'Hagan call these the ``code
+observations'' on the grounds that their chief motivation was the
+understanding of complex computer codes.
+
+Here, potential flow in a rectangle is considered.  The source is at
+one corner of the rectangle, which is considered to have lower left
+point~$(-1,0)$ and upper right point~$(1,1)$.  The position of the
+sink is unknown.
+
+I now show how the position of the sink may be inferred from a sparse
+and noisy set of observed fluid speeds.  Similar inference problems
+are encountered in practice when considering oceanographic flows such
+as those occurring near deep sea vents, although the geometry is
+generally more complex than considered here.
+
+The independent variable~$\mathbf{x}$ is the two-dimensional position
+within the rectangle, and the field observation~$z(\mathbf{x})$ is the
+fluid speed at that point, plus obervational error~$\epsilon$.  The
+parameter set~$\theta$ thus has two degrees of freedom corresponding
+to the $x-$ and $y-$ coordinates of the sink.
+
+Field observations will be obtained numerically, using the
+\pkg{elliptic} package.  The simulated flowfield has a sink at a {\em
+known} position---in this case the geometric centre of the
+rectangle---and Bayesian methods will be used to infer its position
+using only fluid speed data.
+
+In the terminology of KOHa, dataset~\code{y} corresponds to modelled
+fluid speed, obtained from the appropriate simulations carried out
+with the sink placed at different locations within the rectangle.
+Dataset~\code{z} corresponds to field observations, which in this
+case is fluid speed at several points in the rectangle, obtained from
+simulations with the sink at the centre of the rectangle.
+
+<<bacco_flow,echo=FALSE,print=FALSE,cache=TRUE>>=
+# Choose the size of the computational mesh:
+n <- n_BACCO
+
+# Choose the number of code observations for D1:
+n.code.obs <- 30
+
+# And the number of field observations for D2:
+n.field.obs <- 31
+
+# First, up the D1 design matrix.  Recall that D1 is the set of code
+# observations, which here means the observations of fluid speed when
+# the sink is at a known, specified, position.
+
+set.seed(0)
+
+latin.hypercube <- function (n, d){
+  sapply(seq_len(d), function(...) { (sample(1:n) - 0.5)/n })
+}
+
+
+D1.elliptic <- latin.hypercube(n.code.obs , 4)
+colnames(D1.elliptic) <- c("x","y","x.sink","y.sink")
+D1.elliptic[,c(1,3)] <- (D1.elliptic[,c(1,3)] -0.5)*2
+#D1.elliptic[,c(2,4)] <- D1.elliptic[,c(2,4)] *iK
+
+# now a D2 design matrix.  This is field observations: observations of
+# fluid speed when the sink is at the true, unknown, specified,
+# position.
+D2.elliptic <- latin.hypercube(n.field.obs , 2)
+colnames(D2.elliptic) <- c("x","y")
+D2.elliptic[,1] <- (D2.elliptic[,1] -0.5)*2
+
+
+# Now a function that, given x and y and x.sink and y.sink, returns
+# the log of the fluid speed at x,y:
+
+fluid.speed <- function(x.pos, y.pos, x.sink, y.sink){
+  
+  a <- 1   #bottom right hand side corner
+  b <- sn(x.pos/K + 1i*iK*y.pos,m=m)  #position (x.pos , y.pos)
+  f <- function(z){1i*log((z-a)*(z-Conj(a))/(z-b)/(z-Conj(b)))}
+  
+  x <- seq(from=-K,to=K,len=n)
+  y <- seq(from=0,to=iK,len=n)
+  z <- outer(x,1i*y,"+")
+  potential <- f(sn(z,m=m))
+  
+  get.log.ke <- function(x,y,potential){
+    jj <- Re(potential)
+    jj.x <- cbind(jj[,-1]-jj[,-ncol(jj)],0)
+    jj.y <- rbind(jj[-1,]-jj[-nrow(jj),],0)
+    kinetic.energy <- jj.x^2 + jj.y^2
+    n.x <- round(n * (x-(-1))/2)
+    n.y <- round(n * y)
+    return(log(kinetic.energy[n.x , n.y]+0.01))
+  }
+
+  return(get.log.ke(x.pos,y.pos,potential))
+}
+
+# now fill in code outputs y:
+y.elliptic <- rep(NA,nrow(D1.elliptic))
+for(i in 1:length(y.elliptic)){
+  jj <- D1.elliptic[i,,drop=TRUE]
+  y.elliptic[i] <- fluid.speed(jj[1],jj[2],jj[3],jj[4])
+}
+
+
+# Now do the field observations; here the source is known to be at the
+# centre of the rectangle:
+
+z.elliptic <- rep(NA,nrow(D2.elliptic))
+for(i in 1:length(z.elliptic)){
+  jj <- D2.elliptic[i,,drop=TRUE]
+  z.elliptic[i] <- fluid.speed(jj[1],jj[2],0,0.5)
+}
+
+# Create design matrix plus observations for didactic purposes:
+D1 <- round(cbind(D1.elliptic,observation=y.elliptic),2)
+D2 <- round(cbind(D2.elliptic,observation=z.elliptic),2)
+
+
+# create a data vector:
+d.elliptic <- c(y.elliptic , z.elliptic)
+
+#now a h1.toy() equivalent:
+h1.elliptic <- function(x){
+  out <- c(1,x[1])
+}
+
+#now a H1.toy() equivalent:
+ H1.elliptic <- 
+function (D1) 
+{
+    if (is.vector(D1)) {
+        D1 <- t(D1)
+    }
+    out <- t(apply(D1, 1, h1.elliptic))
+    colnames(out)[1] <- "h1.const"
+    return(out)
+}
+                      
+h2.elliptic <-
+  function(x){
+    c(1,x[1]) 
+  }
+
+H2.elliptic <-
+  function(D2){
+    if (is.vector(D2)) {
+      D2 <- t(D2)
+    }
+    out <- t(apply(D2, 1, h2.elliptic))
+    colnames(out)[1] <- "h2.const"
+    return(out)
+  }
+
+
+#Now an extractor function:
+extractor.elliptic <- 
+function (D1) 
+{
+    return(list(x.star = D1[, 1:2, drop = FALSE], t.vec = D1[, 
+        3:4, drop = FALSE]))
+}
+
+# Now a whole bunch of stuff to define a phi.fun.elliptic()
+# and, after that, to call it:
+phi.fun.elliptic <- 
+function (rho, lambda, psi1, psi1.apriori, psi2, psi2.apriori, 
+    theta.apriori, power) 
+{
+    "pdm.maker.psi1" <- function(psi1) {
+        jj.omega_x <- diag(psi1[1:2])
+        rownames(jj.omega_x) <- names(psi1[1:2])
+        colnames(jj.omega_x) <- names(psi1[1:2])
+        jj.omega_t <- diag(psi1[3:4])
+        rownames(jj.omega_t) <- names(psi1[3:4])
+        colnames(jj.omega_t) <- names(psi1[3:4])
+        sigma1squared <- psi1[5]
+        return(list(omega_x = jj.omega_x, omega_t = jj.omega_t, 
+            sigma1squared = sigma1squared))
+    }
+    "pdm.maker.psi2" <- function(psi1) {
+        jj.omegastar_x <- diag(psi2[1:2])
+        sigma2squared <- psi2[3]
+        return(list(omegastar_x = jj.omegastar_x, sigma2squared = sigma2squared))
+    }
+    jj.mean <- theta.apriori$mean
+    jj.V_theta <- theta.apriori$sigma
+    jj.discard.psi1 <- pdm.maker.psi1(psi1)
+    jj.omega_t <- jj.discard.psi1$omega_t
+    jj.omega_x <- jj.discard.psi1$omega_x
+    jj.sigma1squared <- jj.discard.psi1$sigma1squared
+    jj.discard.psi2 <- pdm.maker.psi2(psi2)
+    jj.omegastar_x <- jj.discard.psi2$omegastar_x
+    jj.sigma2squared <- jj.discard.psi2$sigma2squared
+    jj.omega_t.upper <- chol(jj.omega_t)
+    jj.omega_t.lower <- t(jj.omega_t.upper)
+    jj.omega_x.upper <- chol(jj.omega_x)
+    jj.omega_x.lower <- t(jj.omega_x.upper)
+    jj.a <- solve(solve(jj.V_theta) + 2 * jj.omega_t, solve(jj.V_theta, 
+        jj.mean))
+    jj.b <- t(2 * solve(solve(jj.V_theta) + 2 * jj.omega_t) %*% 
+        jj.omega_t)
+    jj.c <- jj.sigma1squared/sqrt(det(diag(nrow = nrow(jj.V_theta)) + 
+        2 * jj.V_theta %*% jj.omega_t))
+    jj.A <- solve(jj.V_theta + solve(jj.omega_t)/4)
+    jj.A.upper <- chol(jj.A)
+    jj.A.lower <- t(jj.A.upper)
+    list(rho = rho, lambda = lambda, psi1 = psi1, psi1.apriori = psi1.apriori, 
+        psi2 = psi2, psi2.apriori = psi2.apriori, theta.apriori = theta.apriori, 
+        power = power, omega_x = jj.omega_x, omega_t = jj.omega_t, 
+        omegastar_x = jj.omegastar_x, sigma1squared = jj.sigma1squared, 
+        sigma2squared = jj.sigma2squared, omega_x.upper = jj.omega_x.upper, 
+        omega_x.lower = jj.omega_x.lower, omega_t.upper = jj.omega_t.upper, 
+        omega_t.lower = jj.omega_t.lower, a = jj.a, b = jj.b, 
+        c = jj.c, A = jj.A, A.upper = jj.A.upper, A.lower = jj.A.lower)
+}
+
+# OK, that's the function defined.  Now to create some jj.* variables
+# to call it:
+
+jj.psi1 <- c(rep(1,4),0.3)
+names(jj.psi1)[1:4] <- colnames(D1.elliptic)
+names(jj.psi1)[5] <- "sigma1squared"
+
+jj.mean.psi1 <- rep(1,5)
+names(jj.mean.psi1) <- names(jj.psi1)
+
+jj.sigma.psi1 <- diag(0.1,nrow=5)
+rownames(jj.sigma.psi1) <- names(jj.psi1)
+colnames(jj.sigma.psi1) <- names(jj.psi1)
+
+jj.psi2 <- c(1,1,0.3)
+names(jj.psi2)[1:2] <- colnames(D2.elliptic)
+names(jj.psi2)[3] <- "sigma2squared"
+
+jj.mean.psi2 <- rep(1,4)
+names(jj.mean.psi2) <- c("x.sink", "y.sink","rho","lambda")
+
+jj.sigma.psi2 <- diag(0.1,4)
+rownames(jj.sigma.psi2) <- names(jj.mean.psi2)
+colnames(jj.sigma.psi2) <- names(jj.mean.psi2)
+
+jj.mean.th <- c(1,0.5)
+names(jj.mean.th) <- colnames(D1.elliptic)[3:4]
+
+jj.sigma.th <- diag(rep(1,2))
+rownames(jj.sigma.th) <- colnames(D1.elliptic)[3:4]
+colnames(jj.sigma.th) <- colnames(D1.elliptic)[3:4]
+
+# Now call phi.fun.elliptic():
+phi.elliptic <-
+  phi.fun.elliptic(
+                   rho=1,
+                   lambda=0.1,
+                   psi1=jj.psi1,
+                   psi2=jj.psi2,
+                   psi1.apriori=list(mean=jj.mean.psi1, sigma=jj.sigma.psi1),
+                   psi2.apriori=list(mean=jj.mean.psi2, sigma=jj.sigma.psi2),
+                   theta.apriori=list(mean=jj.mean.th, sigma=jj.sigma.th),
+                   power=2
+                   )
+
+# Now an E.theta.elliptic():
+E.theta.elliptic <- 
+function (D2 = NULL, H1 = NULL, x1 = NULL, x2 = NULL, phi, give.mean = TRUE) 
+{
+    if (give.mean) {
+        m_theta <- phi$theta.apriori$mean
+        return(H1(D1.fun(D2, t.vec = m_theta)))
+    }
+    else {
+        out <- matrix(0, 2,2)
+        rownames(out) <- c("h1.const","x")
+        colnames(out) <- c("h1.const","x")
+        return(out)
+    }
+}
+
+#Now an Edash.theta.elliptic().  Because the basis vector is not a
+#function of theta, this is a bit academic as we can use a function
+#that is identical to Edash.theta.toy():
+
+Edash.theta.elliptic <-
+function (x, t.vec, k, H1, fast.but.opaque = FALSE, a = NULL, 
+    b = NULL, phi = NULL) 
+{
+    if (fast.but.opaque) {
+        edash.mean <- a + crossprod(b, t.vec[k, ])
+    }
+    else {
+        V_theta <- phi$theta.apriori$sigma
+        m_theta <- phi$theta.apriori$mean
+        omega_t <- phi$omega_t
+        edash.mean <- solve(solve(V_theta) + 2 * omega_t, solve(V_theta, 
+            m_theta) + 2 * crossprod(omega_t, t.vec[k, ]))
+    }
+    jj <- as.vector(edash.mean)
+    names(jj) <- rownames(edash.mean)
+    edash.mean <- jj
+    return(H1(D1.fun(x, edash.mean)))
+}
+
+
+
+# Define a wrapper for equation 8:
+# First, calculate the constant to subtract to ensure that
+# the support has a maximum of about zero: 
+
+maximum.likelihood.support <- p.eqn8.supp(theta=c(0,1/2), D1=D1.elliptic, D2=D2.elliptic, H1=H1.elliptic, H2=H2.elliptic, d=d.elliptic, include.prior=FALSE, lognormally.distributed=FALSE, return.log=TRUE, phi=phi.elliptic)
+
+support <- function(x){
+p.eqn8.supp(theta=x, D1=D1.elliptic, D2=D2.elliptic, H1=H1.elliptic, H2=H2.elliptic, d=d.elliptic, include.prior=FALSE, lognormally.distributed=FALSE, return.log=TRUE, phi=phi.elliptic) - maximum.likelihood.support
+}
+
+#define a local function called optim() for aesthetic reasons (ie it 
+# improves the appearance of the  call to optim():
+
+optim <- 
+  function(par,fn){
+    stats::optim(par=par,fn=fn,control=list(fnscale = -1))$par
+  }
+
+@ 
+
+The code evaluation design matrix~\code{D1} is chosen according to a
+random Latin hypercube design, and the observation is calculated using
+the \pkg{elliptic} package:
+
+<<head_D1,echo=TRUE,print=TRUE>>=
+head(D1)
+@ 
+
+So the first line shows a simulation with the sink
+at~(\Sexpr{D1[1,3]},\Sexpr{D1[1,4]}); the log of the fluid speed
+at~(\Sexpr{D1[1,1]}, \Sexpr{D1[1,2]}) is~\Sexpr{D1[1,5]}.  There are a
+total of~\Sexpr{n.code.obs} such observations.  Figure~\ref{code.obs}
+shows these points superimposed on the ``true'' flow field.
+
+The field observations are similarly determined:
+<<head_D2,echo=TRUE,print=TRUE>>=
+head(D2)
+@ 
+
+showing that the first field observation, at~(\Sexpr{D2[1,1]},
+\Sexpr{D2[1,2]}), is~\Sexpr{D2[1,3]}.  There are a total
+of~\Sexpr{n.field.obs} such observations.  Figure~\ref{field.obs}
+shows the first code observation in the context of the ``true'' flow
+field.
+
+<<calc_b_sn,echo=FALSE,print=FALSE,cache=TRUE>>=
+b <- sn(D1[1,3] + 1i*D1[1,4],m=m) #point corresponding to first line of D1
+fsnz2 <- f(sn(z,m=m))
+@ 
+
+<<code_obs_file>>=
+png("code_obs.png",width=800,height=800)
+@ 
+
+<<code_obs_plot>>=
+view(x,y,fsnz2,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=-1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+points(x=K*D1[1,1],y=D1[1,2]*iK,pch=4)
+rect(-K,0,K,iK,lwd=3)
+@ 
+
+<<code_obs_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{code_obs.png}
+    \caption{Streamlines\label{code.obs} of first code
+      observation point; field observation point shown as a cross.
+      The sink is at~(\Sexpr{D1[1,3]},\Sexpr{D1[1,4]})}
+  \end{center}
+\end{figure}
+
+<<calc_b2,echo=FALSE,print=FALSE,cache=TRUE>>=
+b <- sn(0 + 1i*iK/2,m=m) 
+fsnzz <- f(sn(z,m=m))
+@ 
+
+
+<<true_flow_file>>=
+png("true_flow.png",width=800,height=800)
+@ 
+
+<<true_flow_plot>>=
+view(x,y,fsnzz,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=-1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+points(x=K*D2[,1],y=D2[,2]*iK,pch=4)
+rect(-K,0,K,iK,lwd=3)
+@ 
+
+<<true_flow_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{true_flow.png}
+    \caption{Streamlines\label{field.obs}
+      of ``true'' flow; field observation points shown as crosses}
+  \end{center}
+\end{figure}
+
+Kennedy and O'Hagan give, {\em inter alia,} an expression for the
+likelihood of any value of $\theta$ being the true parameter set (in
+this case, the true position of the sink) in terms of the code
+evaluations and field observations.
+
+Here, function \code{support()} calculates the log-likelihood for a
+pair of coordinates of the sink.  This may be evaluated at the centre
+of the rectangle, and again at the top left corner:
+
+
+<<support,echo=TRUE,print=TRUE>>=
+support(c(0,1/2)) #centre of the rectangle
+support(c(-1,1))   #top left corner
+@ 
+
+showing, as expected, that the support is very much larger at the
+centre of the rectangle than the edge (here the arbitrary additive
+constant is such that the support at \code{c(0,1/2)} is exactly zero).
+It is now possible to identify the position of the sink that
+corresponds to maximum support using numerical optimization
+techniques:
+
+
+<<mle_calc,echo=FALSE,print=FALSE,cache=TRUE>>=
+mle <- optim(c(0,1/2),support)
+@ 
+
+\begin{Schunk}
+\begin{Sinput}
+(mle <- optim(c(0,1/2),support))
+\end{Sinput}
+\end{Schunk}
+
+<<print_mle,echo=FALSE,print=TRUE>>=
+mle
+@ 
+
+Thus the maximum likelihood estimate for the sink is a distance of
+about~0.2 from the true position.  The support at this point is
+about~3.9 units of likelihood:
+
+<<support_of_mle,echo=TRUE,print=TRUE>>=
+support(mle)
+@ 
+
+\subsubsection{Discussion of Bayesian statistical analysis}
+
+The above example shows the ideas of KOH being applied
+straightforwardly, but with the novel twist of $\theta$ being
+interpreted as physical characteristics of a fluid flow.  In this
+case~$\theta$ is the coordinates of the sink.
+
+The MLE is better supported than the true position by about~3.9 units
+of likelihood: thus, in the language of~\cite{edwards1992}, the
+hypothesis of $\theta_\mathrm{true}=(0,0.5)$ would not be rejected if
+one accepted Edwards's 2 units of likelihood per degree of freedom.
+
+The discrepancy between~$\hat{\theta}$ and~$\theta_\mathrm{true}$ (a
+distance of about 0.2) may be due to due to the coarseness of the form
+adopted for the basis functions, and better results might be obtained
+by using a more sophisticated system of model inadequacy than the
+simple linear form presented here.
+
+The methods of KOH allow one to make statistically robust statements
+about the physical characteristics of an interesting flow that are
+difficult to make in any other way.
+
+\section{Conclusions}
+
+Elliptic functions are an interesting and instructive branch of
+complex analysis, and are frequently encountered in applied
+mathematics: here they were used to calculate a potential flow field
+in a rectangle.
+
+This paper introduced the \proglang{R} package \pkg{elliptic}, which was then
+used in conjunction with Bayesian statistical methods (the \pkg{BACCO}
+bundle) to make statistically sound inferences about a flow with
+uncertain parameters: in this case the position of the sink was
+estimated from a sparse and noisy dataset.
+
+
+\subsection*{Acknowledgements}
+I would like to acknowledge the many stimulating and helpful comments
+made by the \proglang{R}-help list over the years.
+
+\bibliography{elliptic}
+\end{document}
diff --git a/inst/doc/ellipticpaper.pdf b/inst/doc/ellipticpaper.pdf
new file mode 100644
index 0000000..c3980e2
Binary files /dev/null and b/inst/doc/ellipticpaper.pdf differ
diff --git a/inst/doc/residuetheorem.R b/inst/doc/residuetheorem.R
new file mode 100644
index 0000000..84446a2
--- /dev/null
+++ b/inst/doc/residuetheorem.R
@@ -0,0 +1,70 @@
+### R code from vignette source 'residuetheorem.Rnw'
+
+###################################################
+### code chunk number 1: requirepackage
+###################################################
+require(elliptic,quietly=TRUE)
+
+
+###################################################
+### code chunk number 2: chooseR
+###################################################
+R <- 400
+
+
+###################################################
+### code chunk number 3: definesemi
+###################################################
+u1     <- function(x){R*exp(pi*1i*x)}
+u1dash <- function(x){R*pi*1i*exp(pi*1i*x)}
+
+
+###################################################
+### code chunk number 4: straightpart
+###################################################
+u2     <- function(x){R*(2*x-1)}
+u2dash <- function(x){R*2}
+
+
+###################################################
+### code chunk number 5: residuetheorem.Rnw:95-96
+###################################################
+f <- function(z){exp(1i*z)/(1+z^2)}
+
+
+###################################################
+### code chunk number 6: ansapp
+###################################################
+answer.approximate <-
+    integrate.contour(f,u1,u1dash) +
+    integrate.contour(f,u2,u2dash) 
+
+
+###################################################
+### code chunk number 7: compareans
+###################################################
+answer.exact <- pi/exp(1)
+abs(answer.approximate - answer.exact)
+
+
+###################################################
+### code chunk number 8: residuetheorem.Rnw:123-124
+###################################################
+abs(integrate.segments(f,c(-R,R,1i*R))- answer.exact)
+
+
+###################################################
+### code chunk number 9: useabigsquare
+###################################################
+abs(integrate.segments(f,c(-R,R,R+1i*R, -R+1i*R))- answer.exact)
+
+
+###################################################
+### code chunk number 10: residuetest
+###################################################
+f <- function(z){sin(z)}
+numerical <- residue(f,z0=1,r=1)
+exact <- sin(1)
+abs(numerical-exact)
+
+
diff --git a/inst/doc/residuetheorem.Rnw b/inst/doc/residuetheorem.Rnw
new file mode 100644
index 0000000..273748d
--- /dev/null
+++ b/inst/doc/residuetheorem.Rnw
@@ -0,0 +1,162 @@
+\documentclass[nojss]{jss}
+
+\usepackage{dsfont}
+\usepackage{bbm}
+\usepackage{amsfonts}
+\usepackage{wasysym}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%% just as usual
+\author{Robin K. S. Hankin}
+\title{The residue theorem from a numerical perspective}
+%\VignetteIndexEntry{The residue theorem from a numerical perspective}
+%% for pretty printing and a nice hypersummary also set:
+%% \Plainauthor{Achim Zeileis, Second Author} %% comma-separated
+\Plaintitle{The residue theorem from a numerical perspective}
+
+%% an abstract and keywords
+\Abstract{A short vignette illustrating Cauchy's integral theorem
+  using numerical integration}
+\Keywords{Residue theorem, Cauchy formula, Cauchy's integral formula,
+  contour integration, complex integration, Cauchy's theorem}
+
+
+\Address{
+  Robin K. S. Hankin\\
+  Auckland University of Technology\\
+  2-14 Wakefield Street\\
+  Auckland\\
+  New Zealand\\
+  E-mail: \email{hankin.robin at gmail.com}
+}
+
+
+%% need no \usepackage{Sweave.sty}
+\begin{document}
+
+
+<<requirepackage,echo=FALSE,print=FALSE>>=
+require(elliptic,quietly=TRUE)
+@ 
+
+
+In this very short vignette, I will use contour integration to evaluate
+\begin{equation}
+  \int_{x=-\infty}^{\infty}\frac{e^{ix}}{1+x^2}\,dx
+  \end{equation}
+using numerical methods.  This document is part of the \pkg{elliptic}
+package~\cite{hankin2006}. 
+
+The residue theorem tells us that the integral of~$f(z)$ along any
+closed nonintersecting path is equal to~$2\pi i$ times the sum of the
+residues inside it.
+
+Take a semicircular path~$P$ from $-R$ to $+R$ along the real axis,
+then following a semicircle in the upper half plane, of radius $R$ to
+close the loop.  Now consider large R.  Then P encloses a pole at~$i$
+[there is one at $-i$ also, but this is outside P, so irrelevent here]
+at which the residue is~$-i/2e$.  Thus
+
+\begin{equation}
+  \oint_P f(z)\,dz=2\pi i\cdot(-i/2e) = \pi/e
+\end{equation}
+  
+along~$P$; the contribution from the semicircle tends to zero
+as~$R\longrightarrow\infty$; thus the integral along the real axis is
+the whole path integral, or~$\pi/e$.
+
+We can now reproduce this result analytically.  First, choose $R$:
+
+<<chooseR>>=
+R <- 400
+@ 
+
+
+And now define a path~$P$.  First, the semicircle:
+
+<<definesemi>>=
+u1     <- function(x){R*exp(pi*1i*x)}
+u1dash <- function(x){R*pi*1i*exp(pi*1i*x)}
+@ 
+
+and now the straight part along the real axis:
+
+<<straightpart>>=
+u2     <- function(x){R*(2*x-1)}
+u2dash <- function(x){R*2}
+@ 
+
+And  define the function:
+
+<<>>=
+f <- function(z){exp(1i*z)/(1+z^2)}
+@ 
+
+Now carry out the path integral.  I'll do it explicitly, but note that
+the contribution from the first integral should be small:
+
+
+<<ansapp>>=
+answer.approximate <-
+    integrate.contour(f,u1,u1dash) +
+    integrate.contour(f,u2,u2dash) 
+@ 
+
+
+And compare with the analytical value:
+
+<<compareans>>=
+answer.exact <- pi/exp(1)
+abs(answer.approximate - answer.exact)
+@ 
+
+Now try the same thing but integrating over a triangle instead of a
+semicircle, using {\tt integrate.segments()}.  Use a path $P'$ with
+base from $-R$ to $+R$ along the real axis, closed by two straight
+segments, one from $+R$ to $iR$, the other from $iR$ to $-R$:
+
+ 
+<<>>=
+abs(integrate.segments(f,c(-R,R,1i*R))- answer.exact)
+@  
+ 
+
+Observe how much better one can do by integrating over a big square
+instead:
+
+
+<<useabigsquare>>=
+abs(integrate.segments(f,c(-R,R,R+1i*R, -R+1i*R))- answer.exact)
+@ 
+
+
+\subsection{Residue theorem}
+
+
+Function \code{residue()} is a wrapper that takes a function~$f(z)$
+and integrates~$f(z)/\left(z-z_0\right)$ around a closed loop which
+encloses~$z_0$.  If $f(\cdot)$ is holomorphic within~$C$, Cauchy's
+residue theorem states that
+\begin{equation}
+  \oint_C\frac{f(z)}{z-z_0} = f(z_0)
+  \end{equation}
+
+
+
+and we can test this numerically:
+
+<<residuetest>>=
+f <- function(z){sin(z)}
+numerical <- residue(f,z0=1,r=1)
+exact <- sin(1)
+abs(numerical-exact)
+@ 
+
+which is unreasonably accurate, IMO.
+
+\bibliography{elliptic}
+\end{document}
diff --git a/inst/doc/residuetheorem.pdf b/inst/doc/residuetheorem.pdf
new file mode 100644
index 0000000..4963621
Binary files /dev/null and b/inst/doc/residuetheorem.pdf differ
diff --git a/man/J.Rd b/man/J.Rd
new file mode 100644
index 0000000..62df68e
--- /dev/null
+++ b/man/J.Rd
@@ -0,0 +1,62 @@
+\name{J}
+\alias{J}
+\alias{lambda}
+\concept{Klein's modular function}
+\concept{Klein's modular function J}
+\concept{Klein's invariant function}
+\concept{Dedekind's valenz function}
+\concept{Dedekind's valenz function J}
+\concept{lambda function}
+\concept{Dedekind}
+
+\title{Various modular functions}
+\description{
+  Modular functions including Klein's modular function J (aka Dedekind's
+  Valenz function J, aka the Klein invariant function, aka Klein's
+  absolute invariant), the lambda function, and Delta.
+}
+\usage{
+J(tau, use.theta = TRUE, ...)
+lambda(tau, ...)
+}
+\arguments{
+  \item{tau}{\eqn{\tau}{tau}; it is assumed that \code{Im(tau)>0}}
+  \item{use.theta}{Boolean, with default \code{TRUE} meaning to use the
+    theta function expansion, and \code{FALSE} meaning to evaluate
+    \code{g2} and \code{g3} directly}
+  \item{\dots}{Extra arguments sent to either \code{theta1()} et seq, or
+    \code{g2.fun()} and \code{g3.fun()} as appropriate}
+}
+\references{
+  K. Chandrasekharan 1985. \emph{Elliptic functions},  Springer-Verlag.
+}
+\author{Robin K. S. Hankin}
+\examples{
+ J(2.3+0.23i,use.theta=TRUE)
+ J(2.3+0.23i,use.theta=FALSE)
+
+ #Verify that J(z)=J(-1/z):
+ z <- seq(from=1+0.7i,to=-2+1i,len=20)
+ plot(abs((J(z)-J(-1/z))/J(z)))
+
+ # Verify that lamba(z) = lambda(Mz) where M is a modular matrix with b,c
+ # even and a,d odd:
+
+ M <- matrix(c(5,4,16,13),2,2)
+ z <- seq(from=1+1i,to=3+3i,len=100)
+ plot(lambda(z)-lambda(M \%mob\% z,maxiter=100))
+
+
+#Now a nice little plot; vary n to change the resolution:
+ n <- 50
+ x <- seq(from=-0.1, to=2,len=n)
+ y <- seq(from=0.02,to=2,len=n)
+
+ z <- outer(x,1i*y,"+")
+ f <- lambda(z,maxiter=40)
+ g <- J(z)
+ view(x,y,f,scheme=04,real.contour=FALSE,main="try higher resolution")
+ view(x,y,g,scheme=10,real.contour=FALSE,main="try higher resolution")
+
+}
+\keyword{math}
diff --git a/man/K.fun.Rd b/man/K.fun.Rd
new file mode 100644
index 0000000..21b167a
--- /dev/null
+++ b/man/K.fun.Rd
@@ -0,0 +1,33 @@
+\name{K.fun}
+\alias{K.fun}
+\alias{e16.1.1}
+\title{quarter period K}
+\description{
+  Calculates the K.fun in terms of either \eqn{m} (\code{K.fun()})
+  or \eqn{k} (\code{K.fun.k()}).
+}
+\usage{
+K.fun(m, strict=TRUE, maxiter=7)
+}
+\arguments{
+  \item{m}{Real or complex parameter}
+  \item{strict}{Boolean, with default \code{TRUE} meaning to return an
+    error if the sequence has not converged exactly, and \code{FALSE}
+    meaning to return the partial sum, and a warning}
+  \item{maxiter}{Maximum number of iterations}
+}
+\references{
+   R. Coquereaux, A. Grossman, and B. E. Lautrup. \emph{Iterative
+method for calculation of the Weierstrass elliptic function}. IMA
+Journal of Numerical Analysis, vol 10, pp119-128, 1990
+}
+\author{Robin K. S. Hankin}
+\examples{
+K.fun(0.09)  # AMS-55 give 1.60804862 in example 7 on page 581
+
+# next example not run because: (i), it needs gsl; (ii) it gives a warning.
+\dontrun{
+K.fun(0.4,strict=F, maxiter=4) - ellint_Kcomp(sqrt(0.4))
+}
+}
+\keyword{math}
diff --git a/man/P.laurent.Rd b/man/P.laurent.Rd
new file mode 100644
index 0000000..d45ef24
--- /dev/null
+++ b/man/P.laurent.Rd
@@ -0,0 +1,38 @@
+\name{P.laurent}
+\alias{P.laurent}
+\alias{Pdash.laurent}
+\alias{sigma.laurent}
+\alias{sigmadash.laurent}
+\alias{zeta.laurent}
+\alias{e18.5.1}
+\alias{e18f.5.3}
+\alias{e18.5.4}
+\alias{e18.5.5}
+\alias{e18.5.6}
+\title{Laurent series for elliptic and related functions}
+\description{
+Laurent series for various functions
+}
+\usage{
+        P.laurent(z, g=NULL, tol=0, nmax=80)
+    Pdash.laurent(z, g=NULL, nmax=80)
+    sigma.laurent(z, g=NULL, nmax=8, give.error=FALSE)
+sigmadash.laurent(z, g=NULL, nmax=8, give.error=FALSE)
+     zeta.laurent(z, g=NULL, nmax=80)
+}
+\arguments{
+  \item{z}{Primary argument (complex)}
+  \item{g}{Vector of length two with \code{g=c(g2,g3)}}
+  \item{tol}{Tolerance}
+  \item{give.error}{In \code{sigma.laurent()}, Boolean with default
+    \code{FALSE} meaning to return the computed value and \code{TRUE}
+    to return the error (as estimated by the sum of the absolute values
+    of the terms along the minor long diagonal of the matrix)}.
+  \item{nmax}{Number of terms used (or, for \code{sigma()}, the size of
+    matrix used)}
+}
+\author{Robin K. S. Hankin}
+\examples{
+sigma.laurent(z=1+1i,g=c(0,4))
+  }
+\keyword{math}
diff --git a/man/WeierstrassP.Rd b/man/WeierstrassP.Rd
new file mode 100644
index 0000000..a67f451
--- /dev/null
+++ b/man/WeierstrassP.Rd
@@ -0,0 +1,118 @@
+\name{WeierstrassP}
+\alias{WeierstrassP}
+\alias{P}
+\alias{Pdash}
+\alias{sigma}
+\alias{zeta}
+\alias{e18.10.1}
+\alias{e18.10.2}
+\alias{e18.10.3}
+\alias{e18.10.4}
+\alias{e18.10.5}
+\alias{e18.10.6}
+\alias{e18.10.7}
+\concept{Weierstrass}
+\concept{Weierstrass P function}
+\concept{Weierstrass elliptic function}
+\concept{Weierstrass zeta function}
+\concept{Weierstrass sigma function}
+\concept{Elliptic functions}
+\title{Weierstrass P and related functions}
+\description{
+Weierstrass elliptic function and its derivative, Weierstrass sigma
+function, and the Weierstrass zeta function
+}
+\usage{
+P(z, g=NULL, Omega=NULL, params=NULL, use.fpp=TRUE, give.all.3=FALSE, ...)
+Pdash(z, g=NULL, Omega=NULL, params=NULL, use.fpp=TRUE, ...)
+sigma(z, g=NULL, Omega=NULL, params=NULL, use.theta=TRUE, ...)
+zeta(z, g=NULL, Omega=NULL, params=NULL, use.fpp=TRUE, ...)
+}
+\arguments{
+  \item{z}{Primary complex argument}
+  \item{g}{Invariants \code{g=c(g2,g3)}.  Supply exactly one of
+    (\code{g}, \code{Omega}, \code{params})}
+  \item{Omega}{Half periods}
+  \item{params}{Object with class \dQuote{\code{parameters}} (typically
+    provided by  \code{parameters()})}
+  \item{use.fpp}{Boolean, with default \code{TRUE} meaning to calculate
+    \eqn{\wp(z^C)}{P(z^C)} where \eqn{z^C} is congruent to \eqn{z}
+    in the period lattice.  The default means that accuracy is greater
+    for large \eqn{z} but has the deficiency that slight
+    discontinuities may appear near parallelogram boundaries}
+  \item{give.all.3}{Boolean, with default \code{FALSE} meaning to return
+    \eqn{\wp(z)}{P(z)} and \code{TRUE} meaning to return the other forms given
+    in equation 18.10.5, p650.  Use \code{TRUE} to check for accuracy}
+  \item{use.theta}{Boolean, with default \code{TRUE} meaning to use
+    theta function forms, and \code{FALSE} meaning to use a Laurent
+    expansion.  Usually, the theta function form is faster, but not
+    always}
+  \item{...}{Extra parameters passed to \code{theta1()} and \code{theta1dash()}}
+}
+\references{
+  R. K. S. Hankin.  \emph{Introducing Elliptic, an R package for
+    Elliptic and Modular Functions}.  Journal of Statistical Software,
+  Volume 15, Issue 7.  February 2006.
+}  
+\author{Robin K. S. Hankin}
+\note{
+  In this package, function \code{sigma()} is the Weierstrass sigma
+  function.  For the number theoretic divisor function also known as
+  \dQuote{sigma}, see \code{divisor()}.
+  }
+\examples{
+## Example 8, p666, RHS:
+P(z=0.07 + 0.1i,g=c(10,2))
+
+## Example 8, p666, RHS:
+P(z=0.1 + 0.03i,g=c(-10,2))
+## Right answer!
+
+## Compare the Laurent series, which also gives the Right Answer (tm):
+ P.laurent(z=0.1 + 0.03i,g=c(-10,2))
+
+
+## Now a nice little plot of the zeta function:
+x <- seq(from=-4,to=4,len=100)
+z <- outer(x,1i*x,"+")
+view(x,x,limit(zeta(z,c(1+1i,2-3i))),nlevels=6,scheme=1)
+
+
+#now figure 18.5, top of p643:
+p <- parameters(Omega=c(1+0.1i,1+1i))
+n <- 40
+
+f <- function(r,i1,i2=1)seq(from=r+1i*i1, to=r+1i*i2,len=n)
+g <- function(i,r1,r2=1)seq(from=1i*i+r1,to=1i*i+2,len=n)
+
+solid.lines <-
+  c(
+    f(0.1,0.5),NA,
+    f(0.2,0.4),NA,
+    f(0.3,0.3),NA,
+    f(0.4,0.2),NA,
+    f(0.5,0.0),NA,
+    f(0.6,0.0),NA,
+    f(0.7,0.0),NA,
+    f(0.8,0.0),NA,
+    f(0.9,0.0),NA,
+    f(1.0,0.0)
+    )
+dotted.lines <-
+  c(
+    g(0.1,0.5),NA,
+    g(0.2,0.4),NA,
+    g(0.3,0.3),NA,
+    g(0.4,0.2),NA,
+    g(0.5,0.0),NA,
+    g(0.6,0.0),NA,
+    g(0.7,0.0),NA,
+    g(0.8,0.0),NA,
+    g(0.9,0.0),NA,
+    g(1.0,0.0),NA
+    )
+
+plot(P(z=solid.lines,params=p),xlim=c(-4,4),ylim=c(-6,0),type="l",asp=1)
+lines(P(z=dotted.lines,params=p),xlim=c(-4,4),ylim=c(-6,0),type="l",lty=2)
+}
+\keyword{math}
diff --git a/man/amn.Rd b/man/amn.Rd
new file mode 100644
index 0000000..c373feb
--- /dev/null
+++ b/man/amn.Rd
@@ -0,0 +1,26 @@
+\name{amn}
+\alias{amn}
+\alias{18.5.7}
+\alias{18.5.8}
+\title{matrix a on page 637}
+\description{
+Matrix of coefficients of the Taylor series for
+\eqn{\sigma(z)}{sigma(z)} as described on page 636 and tabulated on page
+637. 
+}
+\usage{
+amn(u)
+}
+\arguments{
+  \item{u}{Integer specifying size of output matrix}
+}
+\details{
+  Reproduces the coefficients \eqn{a_{mn}}{a_mn} on page 637 according to
+  recurrence formulae 18.5.7 and 18.5.8, p636.  Used in equation
+  18.5.6. 
+  }  
+\author{Robin K. S. Hankin}
+\examples{
+amn(12)   #page 637
+}
+\keyword{math}
diff --git a/man/as.primitive.Rd b/man/as.primitive.Rd
new file mode 100644
index 0000000..ddc30a5
--- /dev/null
+++ b/man/as.primitive.Rd
@@ -0,0 +1,52 @@
+\name{as.primitive}
+\alias{as.primitive}
+\alias{is.primitive}
+\title{Converts basic periods to a primitive pair}
+\description{
+Given a pair of basic periods, returns a primitive pair and (optionally)
+the unimodular transformation used.
+}
+\usage{
+as.primitive(p, n = 3, tol = 1e-05, give.answers = FALSE)
+is.primitive(p, n = 3, tol = 1e-05)
+}
+\arguments{
+  \item{p}{Two element vector containing the two basic periods}
+  \item{n}{Maximum magnitude of matrix entries considered}
+  \item{tol}{Numerical tolerance used to determine reality of period ratios}
+  \item{give.answers}{Boolean, with \code{TRUE} meaning to return extra
+    information (unimodular matrix and the magnitudes of the primitive
+    periods) and default \code{FALSE} meaning to return just the
+    primitive periods}
+}
+\details{
+  Primitive periods are not unique.  This function follows
+  Chandrasekharan and others (but not, of course, Abramowitz and Stegun)
+  in demanding that the real part of \code{p1}, and the
+  imaginary part of \code{p2}, are nonnegative.
+}
+\value{
+  If \code{give.answers} is \code{TRUE}, return a list with components
+  \item{M}{The unimodular matrix used}
+  \item{p}{The pair of primitive periods}
+  \item{mags}{The magnitudes of the primitive periods}
+}
+\references{
+  K. Chandrasekharan 1985. \emph{Elliptic functions},  Springer-Verlag
+  }
+\author{Robin K. S. Hankin}
+\note{Here, \dQuote{unimodular} includes the case of determinant minus
+  one.
+  }
+\examples{
+as.primitive(c(3+5i,2+3i))
+as.primitive(c(3+5i,2+3i),n=5)
+
+##Rounding error:
+is.primitive(c(1,1i))
+
+## Try
+ is.primitive(c(1,1.001i))
+
+}
+\keyword{array}
diff --git a/man/ck.Rd b/man/ck.Rd
new file mode 100644
index 0000000..c3177ae
--- /dev/null
+++ b/man/ck.Rd
@@ -0,0 +1,38 @@
+\name{ck}
+\alias{ck}
+\alias{e18.5.2}
+\alias{e18.5.3}
+\alias{e18.5.16}
+\title{Coefficients of Laurent expansion of Weierstrass P function}
+\description{
+  Calculates the coefficients of the Laurent expansion of the
+  Weierstrass \eqn{\wp}{P} function in terms of the invariants
+}
+\usage{
+ck(g, n=20)
+}
+\arguments{
+  \item{g}{The invariants: a vector of length two with \code{g=c(g2,g3)}}
+  \item{n}{length of series}
+}
+\details{
+  Calculates the series \eqn{c_k} as per equation 18.5.3, p635.
+  }
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{P.laurent}}}
+\examples{
+ #Verify 18.5.16, p636:
+ x <- ck(g=c(0.1+1.1i,4-0.63i))
+14*x[2]*x[3]*(389*x[2]^3+369*x[3]^2)/3187041-x[11] #should be zero
+
+
+# Now try a random example by comparing the default (theta function) method
+# for P(z) with the Laurent expansion:
+
+z <- 0.5-0.3i
+g <- c(1.1-0.2i, 1+0.4i)
+series <- ck(15,g=g)
+1/z^2+sum(series*(z^2)^(0:14)) - P(z,g=g) #should be zero
+}
+
+\keyword{math}
diff --git a/man/congruence.Rd b/man/congruence.Rd
new file mode 100644
index 0000000..057b376
--- /dev/null
+++ b/man/congruence.Rd
@@ -0,0 +1,64 @@
+\name{congruence}
+\alias{congruence}
+\title{Solves mx+by=1 for x and y}
+\description{
+  Solves the Diophantine equation \eqn{mx+by=1}{mx+by=1} for \eqn{x}{x}
+  and \eqn{y}{y}.  The function is named for equation 57 in Hardy and Wright.
+}
+\usage{
+congruence(a, l = 1)
+}
+\arguments{
+  \item{a}{Two element vector with \code{a=c(m,n)}}
+  \item{l}{Right hand side with default 1}
+}
+\value{
+  In the usual case of \eqn{(m,n)=1}, returns a square matrix
+whose rows are \code{a} and \code{c(x,y)}.  This matrix is a unimodular
+transformation that takes a pair of basic periods to another pair of
+basic periods.
+
+If \eqn{(m,n)\neq 1}{(m,n) != 1} then more than one solution is
+available (for example \code{congruence(c(4,6),2)}).  In this case, extra rows
+are added and the matrix is no longer square.
+
+}
+\references{
+    G. H. Hardy and  E. M. Wright 1985.  \emph{An introduction to the
+      theory of numbers}, Oxford University Press (fifth edition)
+}
+\author{Robin K. S. Hankin}
+\note{
+  This function does not generate \emph{all} unimodular matrices with a
+  given first row (here, it will be assumed that the function returns a
+  square matrix).
+
+    For a start, this function only returns matrices all of whose elements
+    are positive, and if \code{a} is unimodular, then after \code{diag(a)
+    <- -diag(a)}, both \code{a} and \code{-a} are unimodular (so if
+    \code{a} was originally generated by \code{congruence()}, neither of the
+    derived matrices could be).
+  
+    Now if the first row is \code{c(1,23)}, for example, then the second
+    row need only be of the form \code{c(n,1)} where \code{n} is any
+    integer.  There are thus an infinite number of unimodular matrices
+    whose first row is \code{c(1,23)}.  While this is (somewhat)
+    pathological, consider matrices with a first row of, say,
+    \code{c(2,5)}.  Then the second row could be \code{c(1,3)}, or
+    \code{c(3,8)} or \code{c(5,13)}.  Function \code{congruence()} will
+    return only the first of these.
+
+    To systematically generate all unimodular matrices, use
+    \code{unimodular()}, which uses Farey sequences.    
+  }
+
+\seealso{\code{\link{unimodular}}}
+\examples{
+M <- congruence(c(4,9))
+det(M)
+
+o <- c(1,1i)
+g2.fun(o) - g2.fun(o,maxiter=840)  #should be zero
+
+}
+\keyword{math}
diff --git a/man/coqueraux.Rd b/man/coqueraux.Rd
new file mode 100644
index 0000000..cf2ef65
--- /dev/null
+++ b/man/coqueraux.Rd
@@ -0,0 +1,39 @@
+\name{coqueraux}
+\alias{coqueraux}
+\title{Fast, conceptually simple, iterative scheme for Weierstrass P
+  functions}
+\description{
+  Fast, conceptually simple, iterative scheme for Weierstrass
+  \eqn{\wp}{P} functions, following the ideas of Robert Coqueraux
+}
+\usage{
+coqueraux(z, g, N = 5, use.fpp = FALSE, give = FALSE)
+}
+\arguments{
+  \item{z}{Primary complex argument}
+  \item{g}{Invariants; if an object of type \code{parameters} is supplied,
+    the invariants will be extracted appropriately}
+  \item{N}{Number of iterations to use}
+  \item{use.fpp}{Boolean, with default \code{FALSE} meaning to \emph{not}
+    reduce \code{z} to the fpp.  Setting to \code{TRUE} reduces
+    \code{z} to the fpp via\code{parameters()}: this is more
+    accurate (see example) but slower}
+  \item{give}{Boolean, with \code{TRUE} meaning to return an estimate of
+    the error, and \code{FALSE} meaning to return just the value}
+}
+\references{
+  R. Coqueraux, 1990.  \emph{Iterative method for calculation of the
+    Weierstrass elliptic function}, IMA Journal of Numerical Analysis,
+  volume 10, pp119-128
+}
+\author{Robin K. S. Hankin}
+\examples{
+ z <- seq(from=1+1i,to=30-10i,len=55)
+ p <- P(z,c(0,1))
+ c.true <- coqueraux(z,c(0,1),use.fpp=TRUE)
+ c.false <- coqueraux(z,c(0,1),use.fpp=FALSE)
+ plot(1:55,abs(p-c.false))
+ points(1:55,abs(p-c.true),pch=16)
+ 
+}
+\keyword{math}
diff --git a/man/divisor.Rd b/man/divisor.Rd
new file mode 100644
index 0000000..49d75fc
--- /dev/null
+++ b/man/divisor.Rd
@@ -0,0 +1,72 @@
+\name{divisor}
+\alias{divisor}
+\alias{primes}
+\alias{factorize}
+\alias{mobius}
+\alias{totient}
+\alias{liouville}
+\concept{Multiplicative functions}
+\title{Number theoretic functions}
+\description{
+Various useful number theoretic functions
+}
+\usage{
+divisor(n,k=1)
+primes(n)
+factorize(n)
+mobius(n)
+totient(n)
+liouville(n)
+}
+\arguments{
+  \item{n,k}{Integers}
+}
+\details{
+  Functions \code{primes()} and \code{factorize()} cut-and-pasted from
+  Bill Venables's conf.design package, version 0.0-3.  Function
+  \code{primes(n)} returns a vector of all primes not exceeding
+  \code{n}; function \code{factorize(n)} returns an integer vector of
+  nondecreasing primes whose product is \code{n}.
+
+  The others are multiplicative functions, defined in Hardy and
+  Wright:
+
+  Function \code{divisor()}, also written
+  \eqn{\sigma_k(n)}{sigma_k(n)}, is the divisor function defined on
+  p239.  This gives the sum of the \eqn{k^{\rm th}}{k-th} powers of all
+  the divisors of \code{n}.  Setting \eqn{k=0} corresponds to
+  \eqn{d(n)}, which gives the number of divisors of \code{n}. 
+
+  Function \code{mobius()} is the Moebius function (p234), giving zero
+  if \code{n} has a repeated prime factor, and \eqn{(-1)^q} where
+  \eqn{n=p_1p_2\ldots p_q}{n=p_1*p_2*...p_q} otherwise.
+
+  Function \code{totient()} is Euler's totient function (p52), giving
+  the number of integers smaller than \code{n} and relatively prime to
+  it.
+
+  Function \code{liouville()} gives the Liouville function.
+}
+\references{G. H. Hardy and E. M. Wright, 1985.  \emph{An
+introduction to the theory of numbers} (fifth edition).
+  Oxford University Press.}
+\note{
+  The divisor function crops up in \code{g2.fun()} and \code{g3.fun()}.
+  Note that this function is not called \code{sigma()} to
+  avoid conflicts with Weierstrass's \eqn{\sigma}{sigma} function (which
+  ought to take priority in this context).
+}
+\author{Robin K. S. Hankin and Bill Venables (\code{primes()} and
+\code{factorize()})}
+\examples{
+mobius(1)
+mobius(2)
+divisor(140)
+divisor(140,3)
+
+
+plot(divisor(1:100,k=1),type="s",xlab="n",ylab="divisor(n,1)")
+
+plot(cumsum(liouville(1:1000)),type="l",main="does the function ever exceed zero?")
+}
+\keyword{math}
diff --git a/man/e16.28.1.Rd b/man/e16.28.1.Rd
new file mode 100644
index 0000000..8d69f7e
--- /dev/null
+++ b/man/e16.28.1.Rd
@@ -0,0 +1,38 @@
+\name{e16.28.1}
+\alias{e16.28.1}
+\alias{e16.28.2}
+\alias{e16.28.3}
+\alias{e16.28.4}
+\alias{e16.28.5}
+\title{Numerical verification of equations 16.28.1 to 16.28.5}
+\description{
+Verifies formulae 16.28.1 to 16.28.5 on p576
+}
+\usage{
+e16.28.1(z, m, ...)
+e16.28.2(z, m, ...)
+e16.28.3(z, m, ...)
+e16.28.4(z, m, ...)
+e16.28.5(m, ...)
+}
+
+\arguments{
+  \item{z}{Complex number}
+  \item{m}{Parameter \eqn{m}}
+  \item{...}{Extra arguments passed to \code{theta[1-4]()}}
+}
+\details{
+Returns the left hand side minus the right hand side of each formula.
+Each formula documented here is identically zero; nonzero values
+are returned due to numerical errors and should be small.
+}
+\references{
+    M. Abramowitz and I. A. Stegun 1965. \emph{Handbook of Mathematical
+      Functions.}  New York, Dover.
+}
+\author{Robin K. S. Hankin}
+\examples{
+ plot(e16.28.4(z=1:6000,m=0.234))
+ plot(abs(e16.28.4(z=1:6000,m=0.234+0.1i)))
+}
+\keyword{array}
diff --git a/man/e18.10.9.Rd b/man/e18.10.9.Rd
new file mode 100644
index 0000000..43a907f
--- /dev/null
+++ b/man/e18.10.9.Rd
@@ -0,0 +1,42 @@
+\name{e18.10.9}
+\alias{e18.10.9}
+\alias{e18.10.9a}
+\alias{e18.10.9b}
+\alias{e18.10.10}
+\alias{e18.10.10a}
+\alias{e18.10.10b}
+\alias{e18.10.11}
+\alias{e18.10.11a}
+\alias{e18.10.11b}
+\alias{e18.10.12}
+\alias{e18.10.12a}
+\alias{e18.10.12b}
+\title{Numerical checks of equations 18.10.9-11, page 650}
+\description{
+Numerical checks of equations 18.10.9-11, page 650.  Function returns
+LHS minus RHS.
+}
+\usage{
+e18.10.9(parameters)
+}
+\arguments{
+  \item{parameters}{An object of class \dQuote{parameters}}
+}
+\value{
+Returns a complex vector of length three: \eqn{e_1},
+\eqn{e_2}, \eqn{e_3}
+}
+\references{
+  M. Abramowitz and I. A. Stegun 1965. \emph{Handbook of Mathematical
+    Functions.}  New York, Dover.
+}
+\author{Robin K. S. Hankin}
+\note{
+  A good check for the three \eqn{e}'s being in the right order.
+  
+}
+\examples{
+e18.10.9(parameters(g=c(0,1)))
+e18.10.9(parameters(g=c(1,0)))
+}
+\keyword{math}
diff --git a/man/e1e2e3.Rd b/man/e1e2e3.Rd
new file mode 100644
index 0000000..c71e784
--- /dev/null
+++ b/man/e1e2e3.Rd
@@ -0,0 +1,99 @@
+\name{e1e2e3}
+\alias{e1e2e3}
+\alias{eee.cardano}
+\alias{e18.3.1}
+\alias{e18.3.7}
+\alias{e18.3.8}
+\title{Calculate e1, e2, e3 from the invariants}
+\description{
+  Calculates e1, e2, e3 from the invariants using
+  either \code{polyroot} or Cardano's method.
+}
+\usage{
+e1e2e3(g, use.laurent=TRUE, AnS=is.double(g), Omega=NULL, tol=1e-6)
+eee.cardano(g)
+}
+\arguments{
+  \item{g}{Two-element vector with \code{g=c(g2,g3)}}
+  \item{use.laurent}{Boolean, with default \code{TRUE} meaning
+    to use \code{P.laurent()} to determine the correct ordering for the
+    \eqn{e}: \eqn{\wp(\omega_1)}{P(omega1)}, \eqn{\wp(\omega_2)}{P(omega2)},
+    \eqn{\wp(\omega_3)}{P(omega3)}.  Setting to  \code{FALSE} means to
+    return the solutions of the cubic equation directly: this is much
+    faster, but is not guaranteed to find the \eqn{e_i}{e} in the
+    right order (the roots are found according to the vagaries of
+    \code{polyroot()})}
+  \item{AnS}{Boolean, with default \code{TRUE} meaning to define
+    \eqn{\omega_3}{omega3} as per ams-55, and \code{FALSE} meaning to
+    follow Whittaker and Watson, and define
+    \eqn{\omega_1}{omega1} and \eqn{\omega_2}{omega2} as the
+    primitive half periods, and
+    \eqn{\omega_3=-\omega_1-\omega_2}{omega3=-omega1-omega2}.  This is
+    also consistent with Chandrasekharan except the factor of 2.
+
+    Also note that setting \code{AnS} to \code{TRUE} forces the
+    \eqn{e} to be real}
+  \item{Omega}{A pair of primitive half periods, if known.  If supplied, the
+    function uses them to calculate approximate values for the three
+    \eqn{e}s (but supplies values calculated by \code{polyroot()},
+    which are much more accurate).  The function needs the approximate
+    values to determine in which order the \eqn{e}s should be, as
+    \code{polyroot()} returns roots in whichever order the polynomial
+    solver gives them in}
+  \item{tol}{Real, relative tolerance criterion for terminating Laurent
+    summation} 
+}
+\value{
+  Returns a three-element vector.
+  }
+\note{
+  Function \code{parameters()} calls \code{e1e2e3()}, so \strong{do not
+    use \code{parameters()} to determine argument \code{g}, because
+    doing so will result in a recursive loop.}
+
+    Just to be specfic: \code{e1e2e3(g=parameters(...))} will fail.  It
+    would be pointless anyway, because \code{parameters()} returns
+    (inter alia) \eqn{e_1, e_2, e_3}{e1, e2, e3} anyway.
+
+    There is considerable confusion about the order of \eqn{e_1}{e1},
+    \eqn{e_2}{e2} and \eqn{e_3}{e3}, essentially due to Abramowitz and
+    Stegun's definition of the half periods being inconsistent with that
+    of Chandrasekharan's, and Mathematica's.  It is not possible to
+    reconcile A and S's notation for theta functions with
+    Chandrasekharan's definition of a primitive pair.  Thus,
+    the convention adopted here is the rather strange-seeming choice of
+    \eqn{e_1=\wp(\omega_1/2)}{e1=P(omega_1/2)},
+    \eqn{e_2=\wp(\omega_3/2)}{e2=P(omega_3/2)},
+    \eqn{e_3=\wp(\omega_2/2)}{e3=P(omega_2/2)}.  This has the advantage
+    of making equation 18.10.5 (p650, ams55), and equation
+    09.13.27.0011.01, return three identical values.
+
+    The other scheme to rescue 18.10.5 would be to define
+    \eqn{(\omega_1,\omega_3)}{(omega1,omega3)} as a primitive pair, and
+    to require
+    \eqn{\omega_2=-\omega_1-\omega_3}{omega2=-omega1-omega3}.  This is
+    the method adopted by Mathematica; it is no more inconsistent with
+    ams55 than the solution used in package \pkg{elliptic}.  However,
+    this scheme suffers from the
+    disadvantage that the independent elements of \code{Omega} would
+    have to be supplied as \code{c(omega1,NA,omega3)}, and this is
+    inimical to the precepts of R.
+
+    One can realize the above in practice by
+    considering what this package calls
+    \dQuote{\eqn{\omega_2}{omega2}} to be \emph{really}
+    \eqn{\omega_3}{omega3}, and what this package calls
+    \dQuote{\eqn{\omega_1+\omega_2}{omega1+omega2}} to be
+    \emph{really} \eqn{\omega_2}{omega2}.  Making function
+    \code{half.periods()} return a three element vector with names
+    \code{omega1}, \code{omega3}, \code{omega2} might work on some
+    levels, and indeed might be the correct solution for a user
+    somewhere; but it would be confusing.  This confusion would
+    dog my weary steps for ever more.    
+  }
+\references{Mathematica}
+\author{Robin K. S. Hankin}
+\examples{
+ sum(e1e2e3(g=c(1,2)))
+}
+\keyword{math}
diff --git a/man/elliptic-package.Rd b/man/elliptic-package.Rd
new file mode 100644
index 0000000..c94141d
--- /dev/null
+++ b/man/elliptic-package.Rd
@@ -0,0 +1,124 @@
+\name{elliptic-package}
+\alias{elliptic-package}
+\alias{elliptic}
+\docType{package}
+\title{
+Elliptic and modular functions
+}
+\description{
+ A suite of elliptic and related functions including Weierstrass and
+ Jacobi forms.  Also includes various tools for manipulating and
+ visualizing complex functions
+}
+\details{
+The primary function in package \pkg{elliptic} is \code{P()}: this
+calculates the Weierstrass \eqn{\wp}{P} function, and may take named
+arguments that specify either the invariants \code{g} or half
+periods \code{Omega}.  The derivative is given by function \code{Pdash}
+and the Weierstrass sigma and zeta functions are given by functions
+\code{sigma()} and \code{zeta()} respectively; these are documented in
+\code{?P}.  Jacobi forms are documented under \code{?sn} and modular
+forms under \code{?J}.
+
+Notation follows Abramowitz and Stegun (1965) where possible, although
+there only real invariants are considered; \code{?e1e2e3} and
+\code{?parameters} give a more detailed discussion.  Various equations
+from AMS-55 are implemented (for fun); the functions are named after
+their equation numbers in AMS-55; all references are to this work unless
+otherwise indicated.
+
+The package uses Jacobi's theta functions (\code{?theta} and
+\code{?theta.neville}) where possible: they converge very quickly.
+
+Various number-theoretic functions that are required for (eg) converting
+a period pair to primitive form (\code{?as.primitive}) are implemented;
+see \code{?divisor} for a list.
+
+The package also provides some tools for numerical verification of
+complex analysis such as contour integration (\code{?myintegrate}) and
+Newton-Raphson iteration for complex functions
+(\code{?newton_raphson}).
+
+Complex functions may be visualized using \code{view()}; this is
+customizable but has an extensive set of built-in colourmaps.
+}
+\author{
+  Robin K. S. Hankin
+}
+\references{
+  \itemize{
+    \item
+    R. K. S. Hankin.  \emph{Introducing Elliptic, an R package for
+      Elliptic and Modular Functions}.  Journal of Statistical Software,
+    Volume 15, Issue 7.  February 2006.
+    \item
+    M. Abramowitz and I. A. Stegun 1965. \emph{Handbook of Mathematical
+      Functions.}  New York, Dover.
+    \item
+    K. Chandrasekharan 1985. \emph{Elliptic functions},  Springer-Verlag.
+    \item
+    E. T. Whittaker and G. N. Watson 1952.  \emph{A Course of Modern
+      Analysis}, Cambridge University Press (fourth edition)
+    \item
+    G. H. Hardy and  E. M. Wright 1985.  \emph{An introduction to the
+      theory of numbers}, Oxford University Press (fifth edition)
+    \item
+    S. D. Panteliou and A. D. Dimarogonas and I. N .Katz 1996.
+    \emph{Direct and inverse interpolation for Jacobian elliptic
+      functions, zeta function of Jacobi and complete elliptic integrals
+      of the second kind}.  Computers and Mathematics with Applications,
+    volume 32, number 8, pp51-57
+    \item
+    E. L. Wachspress 2000.  \emph{Evaluating Elliptic functions and their
+      inverses}.  Computers and Mathematics with Applications, volume 29,
+    pp131-136
+    \item
+    D. G. Vyridis and S. D. Panteliou and I. N. Katz 1999.  \emph{An inverse
+      convergence approach for arguments of Jacobian elliptic functions}.
+    Computers and Mathematics with Applications, volume 37, pp21-26
+    \item
+    S. Paszkowski 1997.  \emph{Fast convergent quasipower series for some
+      elementary and special functions}.   Computers and Mathematics with
+    Applications, volume 33, number 1/2, pp181-191
+    \item
+    B. Thaller 1998. \emph{Visualization of complex functions}, The
+    Mathematica Journal, 7(2):163--180
+    \item
+    J. Kotus and M. Urb\'{a}nski 2003.  \emph{Hausdorff dimension and Hausdorff
+      measures of Julia sets of elliptic functions}.  Bulletin of the London
+    Mathematical Society, volume 35, pp269-275
+  }
+}
+\keyword{ package }
+\examples{
+     ## Example 8, p666, RHS:
+ P(z=0.07 + 0.1i, g=c(10,2)) 
+
+     ## Now a nice little plot of the zeta function:
+ x <- seq(from=-4,to=4,len=100)
+ z <- outer(x,1i*x,"+")
+ par(pty="s")
+ view(x,x,limit(zeta(z,c(1+1i,2-3i))),nlevels=3,scheme=1)
+ view(x,x,P(z*3,params=equianharmonic()),real=FALSE)
+
+     ## Some number theory:
+ mobius(1:10)
+ plot(divisor(1:300,k=1),type="s",xlab="n",ylab="divisor(n,1)")
+
+    ## Primitive periods:
+ as.primitive(c(3+4.01i , 7+10i))
+ as.primitive(c(3+4.01i , 7+10i),n=10)   # Note difference
+
+    ## Now some contour integration:
+ f <- function(z){1/z}
+ u <- function(x){exp(2i*pi*x)}
+ udash <- function(x){2i*pi*exp(2i*pi*x)}
+ integrate.contour(f,u,udash) - 2*pi*1i
+
+
+ x <- seq(from=-10,to=10,len=200)
+ z <- outer(x,1i*x,"+")
+ view(x,x,P(z,params=lemniscatic()),real=FALSE)
+ view(x,x,P(z,params=pseudolemniscatic()),real=FALSE)
+ view(x,x,P(z,params=equianharmonic()),real=FALSE)
+}
diff --git a/man/equianharmonic.Rd b/man/equianharmonic.Rd
new file mode 100644
index 0000000..ff276ee
--- /dev/null
+++ b/man/equianharmonic.Rd
@@ -0,0 +1,51 @@
+\name{equianharmonic}
+\alias{equianharmonic}
+\alias{lemniscatic}
+\alias{pseudolemniscatic}
+\title{Special cases of the Weierstrass elliptic function}
+\description{
+Gives parameters for the equianharmonic case, the lemniscatic case, and
+the pseudolemniscatic case.
+}
+\usage{
+equianharmonic(...)
+lemniscatic(...)
+pseudolemniscatic(...)
+}
+\arguments{
+  \item{\dots}{Ignored}
+}
+\details{
+  These functions return values from section 18.13, p652; 18.14, p658;
+  and 18.15, p662.  They use elementary functions (and the gamma
+  function) only, so ought to be more accurate and faster than calling
+  \code{parameters(g=c(1,0))} directly.
+
+  Note that the values for the half periods correspond to the general
+  case for complex \code{g2} and \code{g3} so are simple linear
+  combinations of those given in AnS.
+    
+  One can use \code{parameters("equianharmonic")} \emph{et seq} instead.
+ 
+}
+\value{
+  Returns a list with the same elements as \code{parameters()}.
+}
+\references{
+    M. Abramowitz and I. A. Stegun 1965. \emph{Handbook of Mathematical
+      Functions.}  New York, Dover.
+}  
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{parameters}}}
+\examples{
+P(z=0.1+0.1212i,params=equianharmonic())
+
+
+x <- seq(from=-10,to=10,len=200)
+z <- outer(x,1i*x,"+")
+view(x,x,P(z,params=lemniscatic()),real=FALSE)
+view(x,x,P(z,params=pseudolemniscatic()),real=FALSE)
+view(x,x,P(z,params=equianharmonic()),real=FALSE)
+
+}
+\keyword{math}
diff --git a/man/eta.Rd b/man/eta.Rd
new file mode 100644
index 0000000..b5606c1
--- /dev/null
+++ b/man/eta.Rd
@@ -0,0 +1,40 @@
+\name{eta}
+\alias{eta}
+\alias{eta.series}
+\title{Dedekind's eta function}
+\description{  Dedekind's \eqn{\eta}{eta} function  }
+\usage{
+eta(z, ...)
+eta.series(z, maxiter=300)
+}
+\arguments{
+  \item{z}{Complex argument}
+  \item{\dots}{In function \code{eta()}, extra arguments sent to
+    \code{theta3()}}
+  \item{maxiter}{In function \code{eta.series()}, maximum value of
+    iteration}
+}
+\details{
+  Function \code{eta()} uses Euler's formula, viz
+  \deqn{\eta(z)=e^{\pi
+      iz/12}\theta_3\left(\frac{1}{2}+\frac{z}{2},3z\right)}{[omitted;
+    see LaTeX version}
+
+  Function \code{eta.series()} is present for validation (and interest)
+  only; it uses the infinite product formula:
+  \deqn{\eta(z)=
+    e^{\pi iz/12}\prod_{n=1}^\infty\left(1-e^{2\pi inz}\right)}{[omitted;
+    see LaTeX version]}
+}
+\references{
+  K. Chandrasekharan 1985. \emph{Elliptic functions},  Springer-Verlag.
+}
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{farey}}}
+\examples{
+ z <- seq(from=1+1i,to=10+0.06i,len=999)
+ plot(eta(z))
+
+max(abs(eta(z)-eta.series(z)))
+}
+\keyword{math}
diff --git a/man/farey.Rd b/man/farey.Rd
new file mode 100644
index 0000000..a25cc0b
--- /dev/null
+++ b/man/farey.Rd
@@ -0,0 +1,40 @@
+\name{farey}
+\alias{farey}
+\title{Farey sequences}
+\description{
+  Returns the Farey sequence of order \eqn{n}{n}
+}
+\usage{
+farey(n, print=FALSE, give.series = FALSE)
+}
+\arguments{
+  \item{n}{Order of Farey sequence}
+  \item{print}{Boolean, with \code{TRUE} meaning to print out the text
+    version of the Farey sequence in human-readable form.  Default value
+    of \code{FALSE} means not to print anything}
+  \item{give.series}{Boolean, with \code{TRUE} meaning to return the
+    series explicitly, and default \code{FALSE} meaning to return a 3
+    dimensional array as detailed below}
+}
+\details{  
+  If \code{give.series} takes its default value of \code{FALSE}, return
+  a matrix \code{a} of dimension \code{c(2,u)} where \code{u} is a
+  (complicated) function of \code{n}.  If \code{v <- a[i,]}, then
+  \code{v[1]/v[2]} is the \eqn{i^{\mathrm th}}{i-th} term of the Farey
+  sequence.  Note that \code{det(a[(n):(n+1),])== -1} 
+
+  If \code{give.series} is \code{TRUE}, then return a matrix \code{a} of
+  size \code{c(4,u-1)}.  If \code{v <- a[i,]}, then \code{v[1]/v[2]} and
+  \code{v[3]/v[4]} are successive pairs of the Farey sequence.  Note
+  that \code{det(matrix(a[,i],2,2))== -1}  
+}
+\references{
+    G. H. Hardy and  E. M. Wright 1985.  \emph{An introduction to the
+      theory of numbers}, Oxford University Press (fifth edition)
+}
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{unimodular}}}
+\examples{
+farey(3)
+}
+\keyword{array}
diff --git a/man/fpp.Rd b/man/fpp.Rd
new file mode 100644
index 0000000..92e3c6f
--- /dev/null
+++ b/man/fpp.Rd
@@ -0,0 +1,53 @@
+\name{fpp}
+\alias{fpp}
+\alias{mn}
+\title{Fundamental period parallelogram}
+\description{
+  Reduce \eqn{z=x+iy} to a congruent value within the
+  fundamental period parallelogram (FPP).  Function \code{mn()} gives
+  (real, possibly noninteger) \eqn{m} and \eqn{n} such that
+  \eqn{z=m\cdot p_1+n\cdot p_2}{z=m*p_1+n*p_2}.
+}
+\usage{
+fpp(z, p, give=FALSE)
+mn(z, p)
+}
+\arguments{
+  \item{z}{Primary complex argument}
+  \item{p}{Vector of length two with first element the first period and
+    second element the second period.  Note that \eqn{p} is the
+    period, so \eqn{p_1=2\omega_1}, where \eqn{\omega_1}{omega1} is the
+    half period}
+  \item{give}{Boolean, with  \code{TRUE} meaning to return M and N, and
+    default \code{FALSE} meaning to return just the congruent values}
+}
+\details{
+  Function \code{fpp()} is fully vectorized.
+
+  Use function \code{mn()} to determine the \dQuote{coordinates} of a
+  point.
+
+  Use \code{floor(mn(z,p)) \%*\% p } to give the  complex value of
+  the (unique) point in the same period parallelogram as \code{z} that
+  is congruent to the origin.
+}
+\author{Robin K. S. Hankin}
+\examples{
+p <- c(1.01+1.123i, 1.1+1.43i)
+mn(z=1:10,p) \%*\% p  ## should be close to 1:10
+
+ #Now specify some periods:
+ p2 <- c(1+1i,1-1i)
+
+ #Define a sequence of complex numbers that zooms off to infinity:
+ u <- seq(from=0,by=pi+1i*exp(1),len=2007)
+
+ #and plot the sequence, modulo the periods:
+ plot(fpp(z=u,p=p2))
+
+ #and check that the resulting points are within the qpp:
+polygon(c(-1,0,1,0),c(0,1,0,-1))
+ 
+
+}
+\keyword{math}
diff --git a/man/g.fun.Rd b/man/g.fun.Rd
new file mode 100644
index 0000000..92087d1
--- /dev/null
+++ b/man/g.fun.Rd
@@ -0,0 +1,118 @@
+\name{g.fun}
+\alias{g.fun}
+\alias{g2.fun}
+\alias{g3.fun}
+\alias{g2.fun.lambert}
+\alias{g3.fun.lambert}
+\alias{g2.fun.direct}
+\alias{g3.fun.direct}
+\alias{g2.fun.fixed}
+\alias{g3.fun.fixed}
+\alias{g2.fun.divisor}
+\alias{g3.fun.divisor}
+\alias{g2.fun.vectorized}
+\alias{g3.fun.vectorized}
+\alias{e18.1.1}
+\title{Calculates the invariants g2 and g3}
+\description{
+Calculates the invariants g2 and g3 using any of a number of methods
+}
+\usage{
+g.fun(b, ...)
+g2.fun(b, use.first=TRUE, ...)
+g3.fun(b, use.first=TRUE, ...)
+g2.fun.lambert(b, nmax=50, tol=1e-10, strict=TRUE)
+g3.fun.lambert(b, nmax=50, tol=1e-10, strict=TRUE)
+g2.fun.direct(b, nmax=50, tol=1e-10)
+g3.fun.direct(b, nmax=50, tol=1e-10)
+g2.fun.fixed(b, nmax=50, tol=1e-10, give=FALSE)
+g3.fun.fixed(b, nmax=50, tol=1e-10, give=FALSE)
+g2.fun.vectorized(b, nmax=50, tol=1e-10, give=FALSE)
+g3.fun.vectorized(b, nmax=50, tol=1e-10, give=FALSE)
+}
+\arguments{
+\item{b}{Half periods.  NB: the arguments
+  are the \strong{half periods} as per AMS55!
+  In these functions, argument \code{b} is interpreted as per
+  \code{p1.tau()}}
+\item{nmax}{Maximum number of terms to sum.  See details section for
+  more discussion}
+\item{tol}{Numerical tolerance for stopping: summation stops when adding
+  an additional term makes less}
+\item{strict}{Boolean, with default (where taken) \code{TRUE} meaning to
+  \code{stop()} if convergence is not achieved in \code{nmax} terms.
+  Setting to \code{FALSE} returns the partial sum and a warning.}
+\item{give}{Boolean, with default (where taken) \code{TRUE} meaning to
+  return the partial sums.  See examples section for an example of this
+  argument in use}
+\item{...}{In functions \code{g.fun()}, \code{g2.fun()} and
+  \code{g3.fun()}, extra arguments passed to \code{theta1()} and friends}
+\item{use.first}{In function \code{g2.fun()} and \code{g3.fun()},
+  Boolean with default \code{TRUE} meaning to use Wolfram's first formula
+  (\strong{remember to cite this}) and \code{FALSE} meaning to use the second}
+}
+\details{
+  Functions \code{g2.fun()} and \code{g3.fun()} use theta functions
+  which converge very quickly.  These functions are the best in most
+  circumstances.  The theta functions include a loop that continues to add
+  terms until the partial sum is unaltered by addition
+  of the next term.  Note that summation continues until \emph{all}
+  elements of the argument are properly summed, so performance is
+  limited by the single worst-case element.
+  
+  The following functions are provided for interest only, although there
+  is a remote possibility that some weird circumstances may exist in which
+  they are faster than the theta function approach.  
+
+  Functions \code{g2.fun.divisor()} and \code{g3.fun.divisor()} use
+  Chandrasekharan's formula on page 83.  This is generally slower than
+  the theta function approach
+
+  Functions \code{g2.fun.lambert()} and \code{g3.fun.lambert()} use a 
+  Lambert series to accelerate Chandrasekharan's formula.  In general,
+  it is a little better than the divisor form.
+  
+  Functions \code{g2.fun.fixed()} and \code{g2.fun.fixed()} also use
+  Lambert series.  These functions are vectorized in the sense that
+  the function body uses only vector operations.  These functions do
+  not take a vector argument.  They are called \dQuote{fixed} because
+  the number of terms used is fixed in advance (unlike \code{g2.fun()}
+  and \code{g3.fun()}).
+  
+  Functions \code{g2.fun.vectorized()} and \code{g3.fun.vectorized()}
+  also use Lambert series.  They are fully vectorized in that they take
+  a vector of periods or period ratios, unlike the previous two
+  functions.  However, this can lead to loss of precision in some
+  cases (specifically when the periods give rise to widely varying
+  values of g2 and g3).
+  
+  Functions \code{g2.fun.direct()} and \code{g3.fun.direct()} use a
+  direct summation.  These functions are absurdly slow.  In general,
+  the Lambert series functions converge much faster; and the
+  \dQuote{default} functions \code{g2.fun()} and \code{g3.fun()},
+  which use theta functions, converge faster still.
+}
+\references{
+  Mathematica website
+}
+\author{Robin K. S. Hankin}
+\examples{
+
+g.fun(half.periods(g=c(8,4+1i)))  ## should be c(8,4+1i)
+
+
+## Example 4, p664, LHS:
+omega <- c(10,11i)
+(g2 <- g2.fun(omega))
+(g3 <- g3.fun(omega))
+e1e2e3(Re(c(g2,g3)))
+
+## Example 4, p664, RHS:
+omega2 <- 10
+omega2dash <- 11i
+omega1 <- (omega2-omega2dash)/2   ## From figure 18.1, p630
+(g2 <- g2.fun(c(omega1,omega2)))
+(g3 <- g3.fun(c(omega1,omega2)))
+e1e2e3(Re(c(g2,g3)))
+}
+\keyword{math}
diff --git a/man/half.periods.Rd b/man/half.periods.Rd
new file mode 100644
index 0000000..a2897a3
--- /dev/null
+++ b/man/half.periods.Rd
@@ -0,0 +1,64 @@
+\name{half.periods}
+\alias{half.periods}
+\title{Calculates half periods in terms of e}
+\description{
+Calculates half periods in terms of \eqn{e}
+}
+\usage{
+half.periods(ignore=NULL, e=NULL, g=NULL, primitive)
+}
+\arguments{
+  \item{e}{e}
+  \item{g}{g}
+  \item{ignore}{Formal argument present to ensure that \code{e} or
+    \code{g} is named (ignored)}
+  \item{primitive}{Boolean, with default \code{TRUE} meaning to return
+    primitive periods and \code{FALSE} to return the direct result of
+    Legendre's iterative scheme}
+}
+\details{    
+  Parameter \code{e=c(e1,e2,e3)} are the values of the Weierstrass
+  \eqn{\wp}{P} function at the half periods:
+  \deqn{e_1=\wp(\omega_1)\qquad e_2=\wp(\omega_2)\qquad e_3=
+  \wp(\omega_3)}{e1=P(omega1), e2=P(omega2), e3=p(omega3)} where
+  \deqn{\omega_1+\omega_2+\omega_3=0}{omega1+omega2+omega3=0}.
+  
+  Also, \eqn{e} is given by the roots of the cubic
+  equation \eqn{x^3-g_2x-g_3=0}{x^3-g2*x-g3=0}, but the problem is
+  finding which root corresponds to which of the three elements of
+  \eqn{e}.
+}
+\value{
+  Returns a pair of primitive half periods
+}
+\references{
+    M. Abramowitz and I. A. Stegun 1965. \emph{Handbook of Mathematical
+      Functions.}  New York, Dover.
+}
+\author{Robin K. S. Hankin}
+\note{Function \code{parameters()} uses function \code{half.periods()}
+  internally, so do not use \code{parameters()} 
+  to determine \code{e}.
+}
+\examples{
+
+half.periods(g=c(8,4))                ## Example 6, p665, LHS
+
+u <- half.periods(g=c(-10,2))
+massage(c(u[1]-u[2] , u[1]+u[2]))     ## Example 6, p665, RHS
+
+half.periods(g=c(10,2))               ## Example 7, p665, LHS
+
+u <- half.periods(g=c(7,6))
+massage(c(u[1],2*u[2]+u[1]))          ## Example 7, p665, RHS
+
+
+half.periods(g=c(1,1i, 1.1+1.4i))
+half.periods(e=c(1,1i, 2, 1.1+1.4i))
+
+
+g.fun(half.periods(g=c(8,4)))         ##  should be c(8,4)
+
+
+}
+\keyword{math}
diff --git a/man/latplot.Rd b/man/latplot.Rd
new file mode 100644
index 0000000..c685511
--- /dev/null
+++ b/man/latplot.Rd
@@ -0,0 +1,32 @@
+\name{latplot}
+\alias{latplot}
+\title{Plots a lattice of periods on the complex plane}
+\description{
+  Given a pair of basic periods, plots a lattice of periods on the complex plane
+}
+\usage{
+latplot(p, n=10, do.lines=TRUE, ...)
+}
+\arguments{
+  \item{p}{Vector of length two with first element the first period and
+    second element the second period.  Note that
+    \eqn{p_1=2\omega_1}{p1=2*omega1}}
+  \item{n}{Size of lattice}
+  \item{do.lines}{Boolean with default \code{TRUE} meaning to show
+    boundaries between adjacent period parallelograms}
+  \item{...}{Extra arguments passed to
+    \code{plot()}.  See examples section for working use}
+}
+\references{
+  K. Chandrasekharan 1985. \emph{Elliptic functions},
+  Springer-Verlag.
+}
+\author{Robin K. S. Hankin}
+\examples{
+p1 <- c(1,1i)
+p2 <- c(2+3i,5+7i)
+latplot(p1)
+latplot(p2,xlim=c(-4,4),ylim=c(-4,4),n=40)
+
+}
+\keyword{math}
diff --git a/man/lattice.Rd b/man/lattice.Rd
new file mode 100644
index 0000000..127eea2
--- /dev/null
+++ b/man/lattice.Rd
@@ -0,0 +1,19 @@
+\name{lattice}
+\alias{lattice}
+\title{Lattice of complex numbers}
+\description{
+Returns a lattice of numbers generated by a  given complex basis.
+}
+\usage{
+lattice(p,n)
+}
+\arguments{
+  \item{p}{Complex vector of length two giving a basis for the lattice}
+  \item{n}{size of lattice}
+}
+\author{Robin K. S. Hankin}
+\examples{
+ lattice(c(1+10i,100+1000i),n=2)
+plot(lattice(c(1+1i,1.1+1.4i),5))
+}
+\keyword{math}
diff --git a/man/limit.Rd b/man/limit.Rd
new file mode 100644
index 0000000..446e782
--- /dev/null
+++ b/man/limit.Rd
@@ -0,0 +1,33 @@
+\name{limit}
+\alias{limit}
+\title{Limit the magnitude of elements of a vector}
+\description{
+  Deals appropriately with objects with a few very large elements
+}
+\usage{
+limit(x, upper=quantile(Re(x),0.99,na.rm=TRUE),
+         lower=quantile(Re(x),0.01,na.rm=TRUE),
+         na = FALSE)
+}
+\arguments{
+  \item{x}{Vector of real or complex values}
+  \item{upper}{Upper limit}
+  \item{lower}{Lower limit}
+  \item{na}{Boolean, with default \code{FALSE} meaning to \dQuote{clip}
+    \code{x} (if real) by setting elements of \code{x} with \code{x>high} to
+    \code{high}; if \code{TRUE}, set such elements to \code{NA}.
+    If \code{x} is complex, this argument is ignored}
+}
+\details{
+  If \code{x} is complex, \code{low} is ignored and the function returns
+  \code{x}, after executing \code{x[abs(x)>high] <- NA}.
+}
+\author{Robin K. S. Hankin}
+\examples{
+
+x <- c(rep(1,5),300, -200)
+limit(x,100)
+limit(x,upper=200,lower= -400)
+limit(x,upper=200,lower= -400,na=TRUE)
+}
+\keyword{math}
diff --git a/man/massage.Rd b/man/massage.Rd
new file mode 100644
index 0000000..c398878
--- /dev/null
+++ b/man/massage.Rd
@@ -0,0 +1,21 @@
+\name{massage}
+\alias{massage}
+\title{Massages numbers near the real line to be real}
+\description{
+Returns the Real part of numbers near the real line
+}
+\usage{
+massage(z, tol = 1e-10)
+}
+\arguments{
+  \item{z}{vector of complex numbers to be massaged}
+  \item{tol}{Tolerance}
+}
+\author{Robin K. S. Hankin}
+\examples{
+massage(1+1i)
+massage(1+1e-11i)
+
+massage(c(1,1+1e-11i,1+10i))
+}
+\keyword{math}
diff --git a/man/misc.Rd b/man/misc.Rd
new file mode 100644
index 0000000..dbbda11
--- /dev/null
+++ b/man/misc.Rd
@@ -0,0 +1,26 @@
+\name{misc}
+\alias{Im<-}
+\alias{Re<-}
+\title{Manipulate real or imaginary components of an object}
+\description{
+Manipulate real or imaginary components of an object
+}
+\usage{
+Im(x) <- value
+Re(x) <- value
+}
+\arguments{
+  \item{x}{Complex-valued object}
+  \item{value}{Real-valued object}
+}
+\author{Robin K. S. Hankin}
+\examples{
+x <- 1:10
+Im(x) <- 1
+
+x <- 1:5
+Im(x) <- 1/x
+
+
+}
+\keyword{math}
diff --git a/man/mob.Rd b/man/mob.Rd
new file mode 100644
index 0000000..21bc35a
--- /dev/null
+++ b/man/mob.Rd
@@ -0,0 +1,41 @@
+\name{mob}
+\alias{mob}
+\alias{\%mob\%}
+\title{Moebius transformations}
+\description{
+Moebius transformations
+}
+\usage{
+mob(M, x)
+M \%mob\% x
+}
+\arguments{
+  \item{M}{2-by-2 matrix of integers}
+  \item{x}{vector of values to be transformed}
+}
+\value{
+  Returns a value with the same attributes as \code{x}.  Elementwise, if
+
+  \deqn{M=\left(\begin{array}{cc}a&b\\c&d\end{array}\right)}{omitted:
+    see PDF}
+
+  then \code{mob(M,x)} is \eqn{\frac{ax+b}{cx+d}}{(ax+b)/(cx+d)}.
+  
+}
+\references{
+  Wikipedia contributors, "Mobius transformation," Wikipedia, The Free
+  Encyclopedia (accessed February 13, 2011). 
+  }
+\author{Robin K. S. Hankin}
+\note{This function does not check for \code{M} being having integer
+  elements, nor for the determinant being unity.
+}
+\seealso{\code{\link{unimodular}}}
+\examples{
+M <- matrix(c(11,6,9,5),2,2)
+x <- seq(from=1+1i,to=10-2i,len=6)
+
+M \%mob\% x
+plot(mob(M,x))
+}
+\keyword{math}
diff --git a/man/myintegrate.Rd b/man/myintegrate.Rd
new file mode 100644
index 0000000..65739fb
--- /dev/null
+++ b/man/myintegrate.Rd
@@ -0,0 +1,167 @@
+\name{myintegrate}
+\alias{myintegrate}
+\alias{integrate.contour}
+\alias{integrate.segments}
+\alias{residue}
+\concept{Complex integration}
+\concept{Path integration}
+\concept{Contour integration}
+\concept{Cauchy's theorem}
+\concept{Cauchy's integral theorem}
+\concept{Cauchy's formula}
+\concept{Residue theorem}
+\title{Complex integration}
+\description{
+  Integration of complex valued functions along the real axis
+  (\code{myintegrate()}), along arbitrary paths
+  (\code{integrate.contour()}), and following arbitrary straight line
+  segments (\code{integrate.segments()}).  Also, evaluation of a function at a
+  point using the residue theorem (\code{residue()}).
+}
+\usage{
+myintegrate(f, lower,upper, ...)
+integrate.contour(f,u,udash, ...)
+integrate.segments(f,points, close=TRUE, ...)
+residue(f, z0, r, O=z0, ...)
+}
+\arguments{
+  \item{f}{function, possibly complex valued}
+  \item{lower,upper}{Lower and upper limits of integration in \code{myintegrate()}; 
+    real numbers (for complex values, use \code{integrate.contour()} or
+    \code{integrate.segments()})}
+  \item{u}{Function mapping \eqn{[0,1]} to the contour.  For a
+    closed contour, require that \eqn{u(0)=u(1)}}
+  \item{udash}{Derivative of \code{u}}
+  \item{points}{In function \code{integrate.segments()}, a vector of complex
+    numbers.  Integration will be taken over straight segments joining
+    consecutive elements of \code{points}}
+  \item{close}{In function \code{integrate.segments()}, a Boolean
+    variable with default \code{TRUE} meaning to integrate along the segment
+    from \code{points[n]} to \code{points[1]} in addition to the internal
+    segments}
+  \item{r,O,z0}{In function \code{residue()} returns \code{f(z0)} by
+    integrating \eqn{f(z)/(z-z0)} around a circle of radius \code{r} and
+    center \code{O}}
+  \item{...}{Extra arguments passed to \code{integrate()}}
+}
+\author{Robin K. S. Hankin}
+\examples{
+
+f1 <- function(z){sin(exp(z))}
+f2 <- function(z,p){p/z}
+
+myintegrate(f1,2,3)  # that is, along the real axis
+
+
+integrate.segments(f1,c(1,1i,-1,-1i),close=TRUE)   # should be zero
+
+# (following should be pi*2i; note secondary argument):
+integrate.segments(f2,points=c(1,1i,-1,-1i),close=TRUE,p=1)
+
+
+
+# To integrate round the unit circle, we need the contour and its
+# derivative:
+
+ u <- function(x){exp(pi*2i*x)}
+ udash <- function(x){pi*2i*exp(pi*2i*x)}
+
+# Some elementary functions, for practice:
+
+# (following should be 2i*pi; note secondary argument 'p'):
+integrate.contour(function(z,p){p/z},u,udash,p=1)      
+integrate.contour(function(z){log(z)},u,udash)         # should be -2i*pi
+integrate.contour(function(z){sin(z)+1/z^2},u,udash)   # should be zero
+
+
+
+# residue() is a convenience wrapper integrating f(z)/(z-z0) along a
+# circular contour:
+
+residue(function(z){1/z},2,r=0.1)  # should be 1/2=0.5
+
+
+
+# Now, some elliptic functions:
+g <- c(3,2+4i)
+Zeta <- function(z){zeta(z,g)}
+Sigma <- function(z){sigma(z,g)}
+WeierstrassP <- function(z){P(z,g)}
+
+jj <- integrate.contour(Zeta,u,udash) 
+abs(jj-2*pi*1i)                              # should be zero
+abs(integrate.contour(Sigma,u,udash))        # should be zero
+abs(integrate.contour(WeierstrassP,u,udash)) # should be zero
+
+
+
+
+# Now integrate f(x) = exp(1i*x)/(1+x^2) from -Inf to +Inf along the
+# real axis, using the Residue Theorem.  This tells us that integral of
+# f(z) along any closed path is equal to pi*2i times the sum of the
+# residues inside it.  Take a semicircular path P from -R to +R along
+# the real axis, then following a semicircle in the upper half plane, of
+# radius R to close the loop.  Now consider large R.  Then P encloses a
+# pole at +1i [there is one at -1i also, but this is outside P, so
+# irrelevent here] at which the residue is -1i/2e.  Thus the integral of
+# f(z) = 2i*pi*(-1i/2e) = pi/e along P; the contribution from the
+# semicircle tends to zero as R tends to infinity; thus the integral
+# along the real axis is the whole path integral, or pi/e.
+
+# We can now reproduce this result analytically.  First, choose an R:
+R <- 400
+
+# now define P.  First, the semicircle, u1:
+u1     <- function(x){R*exp(pi*1i*x)}
+u1dash <- function(x){R*pi*1i*exp(pi*1i*x)}
+
+# and now the straight part along the real axis, u2:
+u2     <- function(x){R*(2*x-1)}
+u2dash <- function(x){R*2}
+
+# Better define the function:
+f <- function(z){exp(1i*z)/(1+z^2)}
+
+# OK, now carry out the path integral.  I'll do it explicitly, but note
+# that the contribution from the first integral should be small:
+
+answer.approximate <-
+    integrate.contour(f,u1,u1dash) +
+    integrate.contour(f,u2,u2dash) 
+
+# And compare with the analytical value:
+answer.exact <- pi/exp(1)
+abs(answer.approximate - answer.exact)
+
+
+# Now try the same thing but integrating over a triangle, using
+# integrate.segments().  Use a path P' with base from -R to +R along the
+# real axis, closed by two straight segments, one from +R to 1i*R, the
+# other from 1i*R to -R:
+
+abs(integrate.segments(f,c(-R,R,1i*R))- answer.exact)
+
+
+# Observe how much better one can do by integrating over a big square
+# instead:
+
+abs(integrate.segments(f,c(-R,R,R+1i*R, -R+1i*R))- answer.exact)
+
+
+# Now in the interests of search engine findability, here is an
+# application of Cauchy's integral formula, or Cauchy's formula.  I will
+# use it to find sin(0.8):
+
+u     <- function(x){exp(pi*2i*x)}
+udash <- function(x){pi*2i*exp(pi*2i*x)}
+
+g <- function(z){sin(z)/(z-0.8)}
+
+a <- 1/(2i*pi)*integrate.contour(g,u,udash)
+
+
+abs(a-sin(0.8))
+
+
+}
+\keyword{math}
diff --git a/man/near.match.Rd b/man/near.match.Rd
new file mode 100644
index 0000000..8aacae1
--- /dev/null
+++ b/man/near.match.Rd
@@ -0,0 +1,22 @@
+\name{near.match}
+\alias{near.match}
+\title{Are two vectors close to one another?}
+\description{
+  Returns \code{TRUE} if each element of \code{x} and \code{y} are
+  \dQuote{near} one another
+}
+\usage{
+near.match(x, y, tol=NULL)
+}
+\arguments{
+  \item{x}{First object}
+  \item{y}{Second object}
+  \item{tol}{Relative tolerance with default NULL meaning to use machine
+    precision} 
+} 
+\author{Robin K. S. Hankin}
+\examples{
+x <- rep(1,6)
+near.match(x, x+rnorm(6)/1e10)
+}
+\keyword{math}
diff --git a/man/newton_raphson.Rd b/man/newton_raphson.Rd
new file mode 100644
index 0000000..ff338d2
--- /dev/null
+++ b/man/newton_raphson.Rd
@@ -0,0 +1,56 @@
+\name{newton_raphson}
+\alias{newton_raphson}
+\alias{Newton_raphson}
+\alias{Newton_Raphson}
+\alias{newton_Raphson}
+\title{Newton Raphson iteration to find roots of equations}
+\description{
+Newton-Raphson iteration to find roots of equations with the emphasis
+on complex functions
+}
+\usage{
+ newton_raphson(initial, f, fdash, maxiter, give=TRUE, tol = .Machine$double.eps)
+}
+\arguments{
+  \item{initial}{Starting guess}
+  \item{f}{Function for which \eqn{f(z)=0} is to be solved for
+    \eqn{z}}
+  \item{fdash}{Derivative of function (note: Cauchy-Riemann conditions
+    assumed)} 
+  \item{maxiter}{Maximum number of iterations attempted}
+  \item{give}{Boolean, with default \code{TRUE} meaning to give
+    output based on that of \code{uniroot()} and \code{FALSE} meaning to
+    return only the estimated root}
+  \item{tol}{Tolerance: iteration stops if \eqn{|f(z)|<tol}{|f(z)|<tol}}
+}
+\details{
+  Bog-standard implementation of the Newton-Raphson algorithm
+}
+\value{
+  If \code{give} is \code{FALSE},
+  returns \eqn{z} with \eqn{|f(z)|<tol}; if \code{TRUE}, returns a list
+  with elements \code{root} (the estimated root), \code{f.root} (the
+  function evaluated at the estimated root; should have small modulus),
+  and \code{iter}, the number of iterations required.
+}
+\note{
+Previous versions of this function used the misspelling
+\dQuote{Rapheson}.
+  }
+\author{Robin K. S. Hankin}
+\examples{
+
+# Find the two square roots of 2+i:
+f <- function(z){z^2-(2+1i)}
+fdash <- function(z){2*z}
+newton_raphson( 1.4+0.3i,f,fdash,maxiter=10)
+newton_raphson(-1.4-0.3i,f,fdash,maxiter=10)
+
+# Now find the three cube roots of unity:
+g <- function(z){z^3-1}
+gdash <- function(z){3*z^2}
+newton_raphson(-0.5+1i,g,gdash,maxiter=10)
+newton_raphson(-0.5-1i,g,gdash,maxiter=10)
+newton_raphson(+0.5+0i,g,gdash,maxiter=10)
+}
+\keyword{math}
diff --git a/man/nome.Rd b/man/nome.Rd
new file mode 100644
index 0000000..a4b133b
--- /dev/null
+++ b/man/nome.Rd
@@ -0,0 +1,28 @@
+\name{nome}
+\alias{nome}
+\alias{nome.k}
+\title{Nome in terms of m or k}
+\description{
+  Calculates the nome in terms of either \eqn{m} (\code{nome()})
+  or \eqn{k} (\code{nome.k()}).
+}
+\usage{
+nome(m)
+nome.k(k)
+}
+\arguments{
+  \item{m}{Real parameter}
+  \item{k}{Real parameter with \eqn{k=m^2}}
+}
+\author{Robin K. S. Hankin}
+\note{
+  The nome is defined as \eqn{e^{-i\pi K'/K}}{exp(-pi*i*K'/K)}, where
+  \eqn{K} and \eqn{iK'} are the quarter periods (see page 576 of
+  AMS-55).  These are calculated using function \code{K.fun()}. 
+}
+\seealso{\code{\link{K.fun}}} 
+\examples{
+nome(0.09)  # AMS-55 give 0.00589414 in example 7 on page 581
+}
+
+\keyword{math}
diff --git a/man/p1.tau.Rd b/man/p1.tau.Rd
new file mode 100644
index 0000000..d2b8d66
--- /dev/null
+++ b/man/p1.tau.Rd
@@ -0,0 +1,34 @@
+\name{p1.tau}
+\alias{p1.tau}
+\title{Does the right thing when calling g2.fun() and g3.fun()}
+\description{
+  Takes vectors and
+  interprets them appropriately for input to \code{g2.fun()} and
+  \code{g3.fun()}.  Not really intended for the end user.
+}
+\usage{
+p1.tau(b)
+}
+\arguments{
+  \item{b}{Vector of periods}
+}
+\details{
+  If \code{b} is of length two, interpret the elements as
+  \eqn{\omega_1}{omega1} and \eqn{\omega_2}{omega2} respectively.
+
+  If a two-column matrix, interpret the columns as
+  \eqn{\omega_1}{omega1} and \eqn{\omega_2}{omega2} respectively.
+
+  Otherwise, interpret as a vector of
+  \eqn{\tau=\omega_1/\omega_2}{tau=omega1/omega2}.
+}
+\value{
+  Returns a two-component list:
+  \item{p1}{First period}
+  \item{tau}{Period ratio}
+}
+\author{Robin K. S. Hankin}
+\examples{
+ p1.tau(c(1+1i,1.1+23.123i))
+}
+\keyword{math}
diff --git a/man/parameters.Rd b/man/parameters.Rd
new file mode 100644
index 0000000..675be4f
--- /dev/null
+++ b/man/parameters.Rd
@@ -0,0 +1,103 @@
+\name{parameters}
+\alias{parameters}
+\alias{e18.7.4}
+\alias{e18.7.5}
+\alias{e18.7.7}
+\alias{e18.3.5}
+\alias{e18.3.3}
+\alias{e18.3.37}
+\alias{e18.3.38}
+\alias{e18.3.39}
+\title{Parameters for Weierstrass's P function}
+\description{
+  Calculates the invariants \eqn{g_2}{g2} and \eqn{g_3}{g3},
+  the e-values \eqn{e_1,e_2,e_3}{e1,e2,e3}, and the half periods
+  \eqn{\omega_1,\omega_2}{omega1, omega2}, from any one of them.
+}
+\usage{
+parameters(Omega=NULL, g=NULL, description=NULL)
+}
+\arguments{
+  \item{Omega}{Vector of length two, containing the \strong{half
+      periods} \eqn{(\omega_1,\omega_2)}{(omega1,omega2)}}
+  \item{g}{Vector of length two:
+    \eqn{(g_2,g_3)}{(g2,g3)}}
+  \item{description}{string containing \dQuote{equianharmonic},
+    \dQuote{lemniscatic}, or \dQuote{pseudolemniscatic}, to specify one
+    of A and S's special cases}    
+}
+\value{
+  Returns a list with the following items:
+  
+  \item{Omega}{A complex vector of length 2 giving the fundamental half
+    periods \eqn{\omega_1}{omega1} and \eqn{\omega_2}{omega2}.  Notation
+    follows Chandrasekharan: half period
+    \eqn{\omega_1}{omega1} is 0.5 times a (nontrivial) period of minimal
+    modulus, and \eqn{\omega_2}{omega2} is 0.5 times a period of smallest
+    modulus having the property \eqn{\omega_2/\omega_1}{omega2/omega1}
+    not real.
+    
+    The relevant periods are made unique by the further requirement that
+    \eqn{Re(\omega_1)>0}{Re(omega1)>0}, and 
+    \eqn{Im(\omega_2)>0}{Im(omega2)>0}; but note that this
+    often results in sign changes when considering cases on boundaries
+    (such as real \eqn{g_2}{g2} and \eqn{g_3}{g3}). 
+    
+    \strong{Note} Different definitions exist for \eqn{\omega_3}{omega3}!
+    A and S use \eqn{\omega_3=\omega_2-\omega_1}{omega3=omega2-omega1},
+    while Whittaker and Watson (eg, page 443), and Mathematica, have
+    \eqn{\omega_1+\omega_2+\omega_3=0}{omega1+omega2+omega3=0}
+    }
+
+    \item{q}{The nome.  Here,
+      \eqn{q=e^{\pi i\omega_2/\omega_1}}{q=exp(pi*i*omega2/omega1)}.}
+    \item{g}{Complex vector of length 2 holding the invariants}
+    \item{e}{Complex vector of length 3.  Here \eqn{e_1}{e1}, \eqn{e_2}{e2},
+      and \eqn{e_3}{e3} are defined by
+      \deqn{e_1=\wp(\omega1/2)m\qquad e_2=\wp(\omega2/2),\qquad
+	e_3=\wp(\omega3/2)}{e1=P(omega1/2), e2=P(omega2/2),
+	e3=P(omega3/2),}
+    where \eqn{\omega_3}{omega3} is defined by
+    \eqn{\omega_1+\omega_2+\omega_3=0}{\omega1+omega2+omega3=0}.
+    
+    Note that the \eqn{e}s are also defined as the three roots of
+    \eqn{x^3-g_2x-g_3=0}{x^3-g2*x-g3=0}; but this method cannot be used in
+    isolation because the roots may be returned in the wrong order.}
+    \item{Delta}{The quantity \eqn{g_2^3-27g_3^2}{g2^3-27*g3^2}, often
+      denoted \eqn{\Delta}{Greek capital Delta}}
+    \item{Eta}{Complex vector of length 3 often denoted
+      \eqn{\eta}{by the greek letter eta}.  Here
+      \eqn{\eta=(\eta_1,\eta_2,\eta_3)}{eta=(eta_1,eta_2,eta_3)} are defined
+      in terms of the Weierstrass zeta function with
+      \eqn{\eta_i=\zeta(\omega_i)}{eta_i\zeta(omega_i)} for \eqn{i=1,2,3}.
+      
+      Note that the name of this element is capitalized to avoid confusion 
+      with function \code{eta()}}
+    \item{is.AnS}{Boolean, with \code{TRUE} corresponding to real
+      invariants, as per Abramowitz and Stegun}
+    \item{given}{character string indicating which parameter was supplied.
+      Currently, one of \dQuote{\code{o}} (omega), or  \dQuote{\code{g}}
+      (invariants)}
+  }
+\author{Robin K. S. Hankin}
+\examples{
+ ## Example 6, p665, LHS
+ parameters(g=c(10,2+0i))
+
+
+ ## Example 7, p665, RHS
+ a <- parameters(g=c(7,6)) ;  attach(a)
+ c(omega2=Omega[1],omega2dash=Omega[1]+Omega[2]*2)
+
+
+  ## verify 18.3.37:
+  Eta[2]*Omega[1]-Eta[1]*Omega[2]   #should be close to pi*1i/2
+
+
+## from Omega to g and and back;
+## following should be equivalentto c(1,1i):
+ parameters(g=parameters(Omega=c(1,1i))$g)$Omega 
+
+
+}
+\keyword{math}
diff --git a/man/pari.Rd b/man/pari.Rd
new file mode 100644
index 0000000..d65fef6
--- /dev/null
+++ b/man/pari.Rd
@@ -0,0 +1,53 @@
+\name{pari}
+\alias{pari}
+\alias{PARI}
+\alias{P.pari}
+\alias{gp}
+\alias{GP}
+\alias{Gp}
+\title{Wrappers for PARI functions}
+\description{
+Wrappers for the three elliptic functions of PARI
+}
+\usage{
+P.pari(z,Omega,pari.fun="ellwp",numerical=TRUE)
+}
+\arguments{
+  \item{z}{Complex argument}
+  \item{Omega}{Half periods}
+  \item{pari.fun}{String giving the name of the function passed to
+    PARI.  Values of \code{ellwp}, \code{ellsigma}, and \code{ellzeta},
+    are acceptable here for the Weierstrass \eqn{\wp}{P} function, the
+    \eqn{\sigma}{sigma} function, and the \eqn{\zeta}{zeta} function
+    respectively}   
+  \item{numerical}{Boolean with default \code{TRUE} meaning to return
+    the complex value returned by PARI, and \code{FALSE} meaning to
+    return the ascii string returned by PARI}
+}
+\details{
+  This function calls PARI via an R \code{system()} call.
+}
+\value{
+  Returns an object with the same attributes as \code{z}.
+}
+\references{\url{http://www.parigp-home.de/}}
+\author{Robin K. S. Hankin}
+\note{
+Function translates input into, for example,
+\dQuote{\code{ellwp([1+1*I,2+3*I],1.111+5.1132*I)}} and pipes this string
+directly into \code{gp}.
+
+The PARI system clearly has more powerful syntax than the basic version
+that I'm using here, but I can't (for example) figure out how to
+vectorize any of the calls.
+}
+\examples{
+\dontrun{ #this in a dontrun environment because it requires pari/gp 
+z  <- seq(from=1,to=3+2i,len=34)
+p <- c(1,1i)
+plot(abs(P.pari(z=z,Omega=p) - P(z=z,Omega=p)))
+plot(zeta(z=z,params=parameters(Omega=p))- P.pari(z=z,Omega=c(p),pari.fun="ellzeta"))
+
+}
+}
+\keyword{math}
diff --git a/man/sn.Rd b/man/sn.Rd
new file mode 100644
index 0000000..5658430
--- /dev/null
+++ b/man/sn.Rd
@@ -0,0 +1,113 @@
+\name{sn}
+\alias{ss}
+\alias{sc}
+\alias{sn}
+\alias{sd}
+\alias{cs}
+\alias{cc}
+\alias{cn}
+\alias{cd}
+\alias{ns}
+\alias{nc}
+\alias{nn}
+\alias{nd}
+\alias{ds}
+\alias{dc}
+\alias{dn}
+\alias{dd}
+\alias{e16.36.3}
+\concept{Jacobi elliptic functions}
+\concept{Jacobi's elliptic functions}
+\concept{Jacobian elliptic functions}
+\title{Jacobi form of the elliptic functions}
+\description{
+  Jacobian elliptic functions
+}
+\usage{
+ss(u,m, ...)
+sc(u,m, ...)
+sn(u,m, ...)
+sd(u,m, ...)
+cs(u,m, ...)
+cc(u,m, ...)
+cn(u,m, ...)
+cd(u,m, ...)
+ns(u,m, ...)
+nc(u,m, ...)
+nn(u,m, ...)
+nd(u,m, ...)
+ds(u,m, ...)
+dc(u,m, ...)
+dn(u,m, ...)
+dd(u,m, ...)
+}
+
+\arguments{
+  \item{u}{Complex argument}
+  \item{m}{Parameter}
+  \item{...}{Extra arguments, such as \code{maxiter}, passed to
+    \code{theta.?()}}
+}
+\details{
+All sixteen Jacobi elliptic functions. 
+}
+\references{
+  M. Abramowitz and I. A. Stegun 1965.  \emph{Handbook of mathematical
+    functions}. New York: Dover
+}
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{theta}}}
+\examples{
+
+#Example 1, p579:
+nc(1.9965,m=0.64)
+# (some problem here)
+
+# Example 2, p579:
+dn(0.20,0.19)
+
+# Example 3, p579:
+dn(0.2,0.81)
+
+# Example 4, p580:
+cn(0.2,0.81)
+
+# Example 5, p580:
+dc(0.672,0.36)
+
+# Example 6, p580:
+Theta(0.6,m=0.36)
+
+# Example 7, p581:
+cs(0.53601,0.09)
+
+# Example 8, p581:
+sn(0.61802,0.5)
+
+#Example 9, p581:
+sn(0.61802,m=0.5)
+
+#Example 11, p581:
+cs(0.99391,m=0.5)
+# (should be 0.75 exactly)
+
+#and now a pretty picture:
+
+n <- 300
+K <- K.fun(1/2)
+f <- function(z){1i*log((z-1.7+3i)*(z-1.7-3i)/(z+1-0.3i)/(z+1+0.3i))}
+# f <- function(z){log((z-1.7+3i)/(z+1.7+3i)*(z+1-0.3i)/(z-1-0.3i))}
+x <- seq(from=-K,to=K,len=n)
+y <- seq(from=0,to=K,len=n)
+z <- outer(x,1i*y,"+")
+
+view(x, y, f(sn(z,m=1/2)), nlevels=44, imag.contour=TRUE,
+     real.cont=TRUE, code=1, drawlabels=FALSE,
+     main="Potential flow in a rectangle",axes=FALSE,xlab="",ylab="")
+rect(-K,0,K,K,lwd=3)
+
+
+
+
+}
+\keyword{math}
diff --git a/man/sqrti.Rd b/man/sqrti.Rd
new file mode 100644
index 0000000..324c745
--- /dev/null
+++ b/man/sqrti.Rd
@@ -0,0 +1,20 @@
+\name{sqrti}
+\alias{sqrti}
+\title{Generalized square root}
+\description{
+Square root wrapper that keeps answer real if possible, coerces to
+complex if not.
+}
+\usage{
+sqrti(x)
+}
+\arguments{
+  \item{x}{Vector to return square root of}
+}
+\author{Robin K. S. Hankin}
+\examples{
+sqrti(1:10)  #real
+sqrti(-10:10) #coerced to complex (compare sqrt(-10:10))
+sqrti(1i+1:10) #complex anyway
+}
+\keyword{math}
diff --git a/man/theta.Rd b/man/theta.Rd
new file mode 100644
index 0000000..ac89e7a
--- /dev/null
+++ b/man/theta.Rd
@@ -0,0 +1,92 @@
+\name{theta}
+\alias{theta}
+\alias{theta1}
+\alias{theta2}
+\alias{theta3}
+\alias{theta4}
+\alias{e16.27.1}
+\alias{e16.27.2}
+\alias{e16.27.3}
+\alias{e16.27.4}
+\alias{theta.00}
+\alias{theta.01}
+\alias{theta.10}
+\alias{theta.11}
+\alias{Theta}
+\alias{Theta1}
+\alias{H}
+\alias{H1}
+\alias{e16.31.1}
+\alias{e16.31.2}
+\alias{e16.31.3}
+\alias{e16.31.4}
+\title{Jacobi theta functions 1-4}
+\description{
+Computes Jacobi's four theta functions for complex \eqn{z} in terms
+of the parameter \eqn{m} or \eqn{q}.
+}
+\usage{
+theta1  (z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta2  (z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta3  (z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta4  (z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta.00(z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta.01(z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta.10(z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+theta.11(z, ignore=NULL, m=NULL, q=NULL, give.n=FALSE, maxiter=30)
+Theta (u, m, ...)
+Theta1(u, m, ...)
+H (u, m, ...)
+H1(u, m, ...)
+}
+\arguments{
+  \item{z,u}{Complex argument of function}
+  \item{ignore}{Dummy variable whose intention is to force the user to
+    name the second argument either \code{m} or \code{q}.}
+  \item{m}{Does not seem to have a name.   The variable is introduced in
+    section 16.1, p569}
+  \item{q}{The nome \eqn{q}, defined in section 16.27, p576}
+  \item{give.n}{Boolean with default \code{FALSE} meaning to return the
+    function evaluation, and \code{TRUE} meaning to return a two element
+    list, with first element the function evaluation, and second element
+    the number of iterations used}
+  \item{maxiter}{Maximum number of iterations used.  Note that the
+    series generally converge very quickly}
+  \item{...}{In functions that take it, extra arguments passed to
+    \code{theta1()} et seq; notably, \code{maxiter}}
+}
+\details{
+  Should have a \code{tol} argument.
+
+  Functions \code{theta.00()} eq seq are just wrappers for 
+  \code{theta1()} et seq, following Whittaker and Watson's terminology
+  on p487; the notation does not appear in Abramowitz and Stegun.
+  \itemize{
+    \item \code{theta.11() = theta1()}
+    \item \code{theta.10() = theta2()}
+    \item \code{theta.00() = theta3()}
+    \item \code{theta.01() = theta4()}
+    }
+  
+}
+\value{
+  Returns a complex-valued object with the same attributes as either
+  \code{z}, or (\code{m} or \code{q}), whichever wasn't recycled.
+}
+\references{
+  M. Abramowitz and I. A. Stegun 1965.  \emph{Handbook of mathematical
+  functions}.  New York: Dover
+}  
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{theta.neville}}}
+\examples{
+
+m <- 0.5
+derivative <- function(small){(theta1(small,m=m)-theta1(0,m=m))/small}
+right.hand.side1 <-  theta2(0,m=m)*theta3(0,m=m)*theta4(0,m=m)
+right.hand.side2 <-  theta1.dash.zero(m)
+
+derivative(1e-5)-right.hand.side1   #should be zero
+derivative(1e-5)-right.hand.side2   #should be zero
+}
+\keyword{array}
diff --git a/man/theta.neville.Rd b/man/theta.neville.Rd
new file mode 100644
index 0000000..b276e47
--- /dev/null
+++ b/man/theta.neville.Rd
@@ -0,0 +1,77 @@
+\name{theta.neville}
+\alias{theta.neville}
+\alias{theta.s}
+\alias{theta.c}
+\alias{theta.d}
+\alias{theta.n}
+\alias{e16.36.6}
+\alias{e16.36.6a}
+\alias{e16.36.6b}
+\alias{e16.36.7}
+\alias{e16.36.7a}
+\alias{e16.36.7b}
+\alias{e16.37.1}
+\alias{e16.37.2}
+\alias{e16.37.3}
+\alias{e16.37.4}
+\alias{e16.38.1}
+\alias{e16.38.2}
+\alias{e16.38.3}
+\alias{e16.38.4}
+\concept{Neville's theta functions}
+\title{Neville's form for the theta functions}
+\description{
+Neville's notation for theta functions as per section 16.36 of
+Abramowitz and Stegun.
+}
+\usage{
+theta.s(u, m, method = "16.36.6", ...)
+theta.c(u, m, method = "16.36.6", ...)
+theta.d(u, m, method = "16.36.7", ...)
+theta.n(u, m, method = "16.36.7", ...)
+}
+
+\arguments{
+  \item{u}{Primary complex argument}
+  \item{m}{Real parameter}
+  \item{method}{Character string corresponding to A and S's equation
+    numbering scheme}
+  \item{...}{Extra arguments passed to the method function, such as
+    \code{maxiter}}
+  }
+\references{
+  M. Abramowitz and I. A. Stegun 1965.  \emph{Handbook of mathematical
+  functions}. New York: Dover
+}
+\author{Robin K. S. Hankin}
+\examples{
+#Figure 16.4.
+m <- 0.5
+K <- K.fun(m)
+Kdash <- K.fun(1-m)
+x <- seq(from=0,to=4*K,len=100)
+plot  (x/K,theta.s(x,m=m),type="l",lty=1,main="Figure 16.4, p578")
+points(x/K,theta.n(x,m=m),type="l",lty=2)
+points(x/K,theta.c(x,m=m),type="l",lty=3)
+points(x/K,theta.d(x,m=m),type="l",lty=4)
+abline(0,0)
+
+
+
+#plot a graph of something that should be zero:
+ x <- seq(from=-4,to=4,len=55)
+ plot(x,(e16.37.1(x,0.5)-theta.s(x,0.5)),pch="+",main="error: note vertical scale")
+
+#now table 16.1 on page 582 et seq:
+ alpha <- 85
+ m <- sin(alpha*pi/180)^2
+## K <- ellint_Kcomp(sqrt(m))
+ K <- K.fun(m)
+ u <- K/90*5*(0:18)
+ u.deg <- round(u/K*90)
+ cbind(u.deg,"85"=theta.s(u,m))      # p582, last col. 
+ cbind(u.deg,"85"=theta.n(u,m))      # p583, last col. 
+
+
+}
+\keyword{math}
diff --git a/man/theta1.dash.zero.Rd b/man/theta1.dash.zero.Rd
new file mode 100644
index 0000000..b0af228
--- /dev/null
+++ b/man/theta1.dash.zero.Rd
@@ -0,0 +1,31 @@
+\name{theta1.dash.zero}
+\alias{theta1.dash.zero}
+\alias{theta1.dash.zero.q}
+\alias{e16.28.6}
+\title{Derivative of theta1}
+\description{
+  Calculates \eqn{\theta_1'}{theta1'} as a function of either \eqn{m}
+  or \eqn{k}
+}
+\usage{
+theta1.dash.zero(m, ...)
+theta1.dash.zero.q(q, ...)
+}
+
+\arguments{
+  \item{m}{real parameter}
+  \item{q}{Real parameter}
+  \item{...}{Extra arguments passed to \code{theta1()} et seq, notably
+    \code{maxiter}}
+}
+\author{Robin K. S. Hankin}
+\examples{
+#Now, try and get 16.28.6, p576: theta1dash=theta2*theta3*theta4:
+
+m <- 0.5
+derivative <- function(small){(theta1(small,m=m)-theta1(0,m=m))/small}
+right.hand.side <-  theta2(0,m=m)*theta3(0,m=m)*theta4(0,m=m)
+derivative(1e-7)-right.hand.side
+
+}
+\keyword{math}
diff --git a/man/theta1dash.Rd b/man/theta1dash.Rd
new file mode 100644
index 0000000..98d4546
--- /dev/null
+++ b/man/theta1dash.Rd
@@ -0,0 +1,44 @@
+\name{theta1dash}
+\alias{theta1dash}
+\alias{theta1dashdash}
+\alias{theta1dashdashdash}
+\title{Derivatives of theta functions}
+\description{
+First, second, and third derivatives of the theta functions
+}
+\usage{
+theta1dash(z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, maxiter = 30)
+theta1dashdash(z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, maxiter = 30)
+theta1dashdashdash(z, ignore = NULL, m = NULL, q = NULL, give.n = FALSE, maxiter = 30)
+}
+\arguments{
+  \item{z}{Primary complex argument}
+  \item{ignore}{Dummy argument to force the user to name the next
+    argument either \code{m} or \code{q}}
+  \item{m}{m as documented in \code{theta1()}}
+  \item{q}{q as documented in \code{theta1()}}
+    \item{give.n}{Boolean with default \code{FALSE} meaning to return the
+    function evaluation, and \code{TRUE} meaning to return a two element
+    list, with first element the function evaluation, and second element
+    the number of iterations used}
+  \item{maxiter}{Maximum number of iterations}
+}
+\details{
+  Uses direct expansion as for \code{theta1()} et seq
+}
+\references{
+   M. Abramowitz and I. A. Stegun 1965. \emph{Handbook of Mathematical
+     Functions.}  New York, Dover
+   }
+\author{Robin K. S. Hankin}
+\seealso{\code{\link{theta}}}
+\examples{
+m <- 0.3+0.31i
+z <- seq(from=1,to=2+1i,len=7)
+delta <- 0.001
+deriv.numer <- (theta1dashdash(z=z+delta,m=m) - theta1dashdash(z=z,m=m))/delta
+deriv.exact <- theta1dashdashdash(z=z+delta/2,m=m)
+abs(deriv.numer-deriv.exact)
+
+}
+\keyword{math}
diff --git a/man/unimodular.Rd b/man/unimodular.Rd
new file mode 100644
index 0000000..7b36606
--- /dev/null
+++ b/man/unimodular.Rd
@@ -0,0 +1,47 @@
+\name{unimodular}
+\alias{unimodular}
+\alias{unimodularity}
+\title{Unimodular matrices}
+\description{
+Systematically generates unimodular matrices; numerical verfication of a
+function's unimodularness
+}
+\usage{
+unimodular(n)
+unimodularity(n,o, FUN, ...)
+}
+\arguments{
+  \item{n}{Maximum size of entries of matrices}
+  \item{o}{Two element vector}
+  \item{FUN}{Function whose unimodularity is to be checked}
+  \item{...}{Extra arguments passed to \code{FUN}}
+}
+\details{
+  Here, a \sQuote{unimodular} matrix is of size \eqn{2\times 2}{2x2},
+  with integer entries and a determinant of unity.
+}
+\value{
+  Function \code{unimodular()} returns an array \code{a} of dimension
+  \code{c(2,2,u)} (where \code{u} is a complicated function of \code{n}).
+  Thus 3-slices of \code{a} (that is, \code{a[,,i]}) are unimodular.
+
+  Function \code{unimodularity()} returns the result of applying
+  \code{FUN()} to the unimodular transformations of \code{o}.  The
+  function returns a vector of length \code{dim(unimodular(n))[3]}; if
+  \code{FUN()} is unimodular and roundoff is neglected, all elements of
+  the vector should be identical.
+}
+\author{Robin K. S. Hankin}
+\note{In function \code{as.primitive()}, a \sQuote{unimodular} may have
+  determinant minus one.
+}
+\seealso{\code{\link{as.primitive}}}
+\examples{
+unimodular(3)
+
+o <- c(1,1i)
+plot(abs(unimodularity(3,o,FUN=g2.fun,maxiter=100)-g2.fun(o)))
+
+
+}
+\keyword{array}
diff --git a/man/view.Rd b/man/view.Rd
new file mode 100644
index 0000000..feba477
--- /dev/null
+++ b/man/view.Rd
@@ -0,0 +1,123 @@
+\name{view}
+\alias{view}
+\concept{Thaller}
+\title{Visualization of complex functions}
+\description{
+Visualization of complex functions using colourmaps and contours
+}
+\usage{
+view(x, y, z, scheme = 0, real.contour = TRUE, imag.contour = real.contour,
+default = 0, col="black", r0=1, power=1, show.scheme=FALSE, ...)
+}
+\arguments{
+  \item{x,y}{Vectors showing real and imaginary components of complex
+    plane; same functionality as  arguments to \code{image()}}
+  \item{z}{Matrix of complex values to be visualized}
+  \item{scheme}{Visualization scheme to be used.  A numeric value is
+    interpreted as one of the (numbered) provided schemes; see source
+    code for details, as I add new schemes from time to time and the
+    code would in any case dominate anything written here.
+
+    A default of zero corresponds to Thaller (1998): see references.  
+    For no colour (ie a white background), set \code{scheme} to a
+    negative number.
+
+    If \code{scheme} does not correspond to a built-in function, the
+    \code{switch()} statement \dQuote{drops through} and provides a
+    white background (use this to show just real or imaginary contours;
+    a value of \eqn{-1} will always give this behaviour)
+
+    If not numeric, \code{scheme} is interpreted as a function that
+    produces a colour; see examples section below.  See details section
+    for some tools that make writing such functions easier}
+  \item{real.contour,imag.contour}{Boolean with default \code{TRUE}
+    meaning to draw contours of constant \eqn{Re(z)} (resp: \eqn{Im(z)})
+    and \code{FALSE} meaning not to draw them}
+  \item{default}{Complex value to be assumed for colouration, if
+    \code{z} takes \code{NA} or infinite values; defaults to zero.
+    Set to \code{NA} for no substitution (ie plot \code{z} \dQuote{as is});
+    usually a bad idea}
+  \item{col}{Colour (sent to \code{contour()})}
+  \item{r0}{If \code{scheme=0}, radius of Riemann sphere as used by
+    Thaller}
+  \item{power}{Defines a slight generalization of Thaller's scheme.
+    Use high values to emphasize areas of high modulus (white) and low
+    modulus (black); use low values to emphasize the argument over the
+    whole of the function's domain.
+
+    This argument is also applied to some of the other schemes where it
+    makes sense}
+    \item{show.scheme}{Boolean, with default \code{FALSE} meaning to
+    perform as advertized and visualize a complex function; and
+    \code{TRUE} meaning to return the function corresponding to the
+    value of argument \code{scheme}}
+  \item{\dots}{Extra arguments passed to \code{image()} and \code{contour()}}
+}
+\details{
+  The examples given for different values of \code{scheme} are intended
+  as examples only: the user is encouraged to experiment by passing
+  homemade colour schemes (and indeed to pass such schemes to the
+  author).
+
+  Scheme 0 implements the ideas of Thaller: the complex plane is mapped
+  to the Riemann sphere, which is coded with the North pole white
+  (indicating a pole) and the South Pole black (indicating a zero).  The
+  equator (that is, complex numbers of modulus \code{r0}) maps to
+  colours of maximal saturation.
+
+  Function \code{view()} includes several tools that simplify the
+  creation of suitable functions for passing to \code{scheme}.
+
+  These include:
+  \describe{
+  \item{\code{breakup()}:}{Breaks up a continuous map:
+    \code{function(x){ifelse(x>1/2,3/2-x,1/2-x)}}}
+  \item{\code{g()}:}{maps positive real to \eqn{[0,1]}:
+    \code{function(x){0.5+atan(x)/pi}}}
+  \item{\code{scale()}:}{scales range to \eqn{[0,1]}:
+    \code{function(x){(x-min(x))/(max(x)-min(x))}}}
+  \item{\code{wrap()}:}{wraps phase to \eqn{[0,1]}:
+    \code{function(x){1/2+x/(2*pi)}}}
+  }
+}
+\author{Robin K. S. Hankin}
+\note{
+  Additional ellipsis arguments are given to both \code{image()} and
+  \code{contour()} (typically, \code{nlevels}).  The resulting
+  \code{warning()} from one or other function is suppressed by
+  \code{suppressWarnings()}.
+}  
+\references{
+  B. Thaller 1998. \emph{Visualization of complex functions}, The
+  Mathematica Journal, 7(2):163--180
+  }
+\examples{
+n <- 100
+x <- seq(from=-4,to=4,len=n)
+y <- x
+z <- outer(x,1i*y,"+")
+view(x,y,limit(1/z),scheme=2)
+view(x,y,limit(1/z),scheme=18)
+
+
+view(x,y,limit(1/z+1/(z-1-1i)^2),scheme=5)
+view(x,y,limit(1/z+1/(z-1-1i)^2),scheme=17)
+
+view(x,y,log(0.4+0.7i+log(z/2)^2),main="Some interesting cut lines")
+
+
+view(x,y,z^2,scheme=15,main="try finer resolution")
+view(x,y,sn(z,m=1/2+0.3i),scheme=6,nlevels=33,drawlabels=FALSE)
+
+view(x,y,limit(P(z,c(1+2.1i,1.3-3.2i))),scheme=2,nlevels=6,drawlabels=FALSE)
+view(x,y,limit(Pdash(z,c(0,1))),scheme=6,nlevels=7,drawlabels=FALSE)
+view(x,x,limit(zeta(z,c(1+1i,2-3i))),nlevels=6,scheme=4,col="white")
+
+# Now an example with a bespoke colour function:
+
+ fun <- function(z){hcl(h=360*wrap(Arg(z)),c= 100 * (Mod(z) < 1))}
+ view(x,x,limit(zeta(z,c(1+1i,2-3i))),nlevels=6,scheme=fun)
+
+view(scheme=10, show.scheme=TRUE)
+}
+\keyword{math}
diff --git a/tests/aaa.R b/tests/aaa.R
new file mode 100644
index 0000000..f2603c5
--- /dev/null
+++ b/tests/aaa.R
@@ -0,0 +1,207 @@
+require(elliptic)
+require(MASS)
+
+safety.factor <- 100
+"test" <- function(x,abs.error=1e-6){stopifnot(abs(x)<abs.error*safety.factor)}
+
+
+
+# equations 16.20.1 to 16.20.3, Jacobi's imaginary transform:
+u <-  seq(from=1,to=4+1i,len=10)
+m <- 0.1+0.1123312i
+test(sn(1i*u,m=m) - 1i*sc(u,m=1-m))
+test(cn(1i*u,m=m) -    nc(u,m=1-m))
+test(dn(1i*u,m=m) -    dc(u,m=1-m))
+
+
+# eqs 16.28.1-16.28.5, p576:
+test(abs(e16.28.1(z=1:600,m=0.234+0.1i)),abs.error=2e-15)
+test(abs(e16.28.2(z=1:600,m=0.234+0.1i)),abs.error=2e-15)
+test(abs(e16.28.3(z=1:600,m=0.234+0.1i)),abs.error=2e-15)
+test(abs(e16.28.4(z=1:600,m=0.234+0.1i)),abs.error=2e-15)
+
+test(abs(e16.28.5(m=seq(from=-0.234+0.1i, to=0.44-1i,len=100))),abs.error=2e-15)
+
+#Now, try and get 16.28.6, p576: theta1dash=theta2*theta3*theta4:
+m <- 0.5
+derivative <- function(small){(theta1(small,m=m)-theta1(0,m=m))/small}
+right.hand.side <-  theta2(0,m=m)*theta3(0,m=m)*theta4(0,m=m)
+test(derivative(1e-7)-right.hand.side)
+#looks fine.
+
+
+
+
+#now compare e16.37.1 (product form for Neville's theta.s) with
+#eq 16.36.6:
+test( e16.37.1(0.5,0.5) - theta.s(0.5,0.5))
+
+
+
+#now compare   e16.37.2 with eq 16.36.6, part 2:
+test(e16.37.2(0.75,0.5) - theta.c(0.75,0.5))
+
+# An identity of the form pq= pn/qn:
+test(theta.c(0.5,0.5)/theta.d(0.5,0.5)- cd(0.5,0.5),abs.error=1e-9)
+
+
+
+#Now check Laurent series for equianharmonic case, table on p656:
+page.656 <- ck(g=c(0,1),n=12)-as.vector(t(cbind(0,0,c(1/28,1/10192,1/5422144,3/(5*13^2*19*28^4)))))
+test(abs(page.656),abs.error=1e-19)
+
+
+
+# Example 2, p579:
+test(dn(0.20,0.19)-0.996253, abs.error=1e-6)
+
+# Example 3, p579:
+test(dn(0.2,0.81)-0.98406, abs.error=1e-5)
+
+# Example 4, p580:
+test(cn(0.2,0.81)-0.980278, abs.error=1e-6)
+
+# Example 5, p580:
+test(dc(0.672,0.36)-1.174,abs.error=1e-4)
+
+# Example 6, p580:
+test(Theta(0.6,m=0.36)-0.97357,abs.error=1e-5)
+
+# Example 7, p581:
+test(cs(0.5360162,0.09)-1.6918083,abs.error=1e-7)
+
+# Example 8, p581:
+test(sn(0.61802,0.5)-0.56458,abs.error=1e-5)
+
+#Example 9, p581:
+test(sc(0.61802,m=0.5)-0.68402,abs.error=1e-5)
+
+#Example 11, p581:
+test(cs(0.99391,m=0.5)-0.75,abs.error=1e-5)
+
+# Example 8, p666, LHS
+test(P(z=0.07 + 0.1i, g=c(10,2)) - (-22.97450010 - 63.0532328i),abs.error=1e-7)
+
+# Now check sigma() against some Maple arguments:
+test(sigma(1+0.4i,g=c(2+0.3i,1-0.99i)) - (1.006555817+0.3865197102i),abs.error=1e-9)
+test(sigma(10-8i,g=c(1-0.4i,2.1-0.7i))-(-1.033893831e18 + 6.898810975e17i),1e11)
+test(sigma(4,g=c(2,3)) - (-80.74922381),abs.error=1e-7)
+
+#Now verify that g2.fun() and g3.fun() are in fact unimodular:
+o <- c(1,1i)
+ test(abs(unimodularity(7,o,FUN=g2.fun, maxiter=100)-g2.fun(o)),abs.error=1e-9)
+ test(abs(unimodularity(7,o,FUN=g3.fun, maxiter=100)-g3.fun(o)),abs.error=2e-9)
+
+M <- congruence(c(4,9))
+test(abs(g.fun(o) - g.fun(M %*% o,maxiter=840)),2e-13)
+
+# Verify Jacobi's formula numerically:
+test(theta1dash(z=0,q=0.1+0.2i) -  theta1.dash.zero.q(0.1+0.2i),abs.error=3e-16)
+
+#Now verify theta1.dashdash etc:
+
+#d/dz (theta1) == theta1dash:
+m <- 0.3+0.31i
+z <- seq(from=1,to=2+1i,len=7)
+delta <- 0.001
+deriv.numer <- (theta1(z=z+delta,m=m) - theta1(z=z,m=m))/delta
+deriv.exact <- theta1dash(z=z+delta/2,m=m)
+test(deriv.numer-deriv.exact,abs.error=1e-7)
+
+#d/dz (theta1dash) == theta1dashdash:
+deriv.numer <- (theta1dash(z=z+delta,m=m) - theta1dash(z=z,m=m))/delta
+deriv.exact <- theta1dashdash(z=z+delta/2,m=m)
+test(deriv.numer-deriv.exact,abs.error=1e-7)
+
+#d/dz (theta1dashdash) == theta1dashdashdash:
+deriv.numer <- (theta1dashdash(z=z+delta,m=m) - theta1dashdash(z=z,m=m))/delta
+deriv.exact <- theta1dashdashdash(z=z+delta/2,m=m)
+test(deriv.numer-deriv.exact,abs.error=2e-7)
+
+
+# Example 13, page 668, LHS:
+test(sigma.laurent(z=0.4 + 1.3i,g=c(8,4),nmax=8)-(0.278080 + 1.272785i),abs.error=6e-8)
+
+# Example 13, page 668, RHS:
+test(sigma.laurent(z=0.8 + 0.4i,g=c(7,6),nmax=8)-(0.81465765 + 0.38819473i),abs.error=1e-8)
+
+
+# Check  P() against Some Maple outputs (I just made up the arguments):
+test(P(1+0.3i,g=c(1+1i,2-0.33i),give.all.3=TRUE)-(0.8231651984-0.3567903513i),abs.error=1e-10)
+test(P(-4-4i,g=c(0.3123+10i,0.1-0.2222i),give.all.3=TRUE)-(-1.118985985-1.038221043i),abs.error=1e-9)
+test(P(10+2i,g=c(1,4+0i),give.all.3=TRUE)-(2.021264367-0.9875939553i),abs.error=1e-10)
+
+
+# check e18.10.9, p650:
+test(e18.10.9(parameters(g=c(1,3+0.2i))), abs.error=2e-14)
+test(e18.10.9(parameters(g=c(1,3+  0i))), abs.error=1e-14)
+test(e18.10.9(parameters(g=c(1,0.1+0i))), abs.error=1e-14)
+
+
+# check that P'2=4P^3-g2*P-g3:
+g <- c(1.44+0.1i, -0.3+0.99i)
+g2 <- g[1]
+g3 <- g[2]
+u <- parameters(g=g)
+z <- seq(from= 10-14i, to=-10+20i, len=777)
+p <- P(z,g)
+pd <- Pdash(z,g)
+test(4*p^3-g2*p-g3-pd^2, 2e-11)
+
+
+# check that (P')^2 =4(P-e1)(P-e2)(P-e3):
+test(pd^2-4*(p-u$e[1])*(p-u$e[2])*(p-u$e[3]))
+
+
+#now some tests of eta() and eta.series():
+ z <- seq(from=1+1i,to=10+0.6i,len=99)
+test(eta(z)-eta.series(z),abs.error=2e-14)
+test(eta(z+1)-eta(z)*exp(pi*1i/12),1e-14)
+test(eta(1i)-gamma(1/4)/2/pi^(3/4),abs.error=1e-15)
+test(theta3(0,q=exp(pi*1i*z))-eta((z+1)/2)^2/eta(1+z),abs.error=4e-15)
+
+
+#now test J() and lambda() for being unimodular:
+ M <- matrix(c(5,4,16,13),2,2)
+ z <- seq(from=1+1i,to=3+3i,len=10)
+ test(J(z)-J(M %mob% z,maxiter=100),1e-7)
+ test(lambda(z)-lambda(M %mob% z,maxiter=100),1e-12)
+
+# some identities for lambda function:
+ z <- seq(from= -1+0.42i,to=10+7i,len=20)
+ test(lambda(z)-lambda(z+2))
+ test(lambda(z+1)-lambda(z)/(lambda(z)-1))
+
+
+# and one for J():
+test(J(1i+-10:10)-1,abs.error=2e-15)
+
+
+#standard example of divisor function:
+test(divisor(140)-336)
+
+#divisor() is multiplicative:
+test(divisor(11*12)-divisor(11)*divisor(12))
+
+#Euler's generalization of Fermat's little theorem:
+test((2^totient(15)) %%15 - 1)
+
+#totient(p)= p-1 for prime p:
+test(1+totient(primes(100)) - primes(100))
+
+#totient() is multiplicative:
+test(totient(25)*totient(1:14)-totient(25)*totient(1:14))
+
+#mobius() is multiplicative:
+test(mobius(23*1:10)-mobius(23)*mobius(1:10))
+
+#Numerical verification of Mobius inversion theorem, using f(n)=1.
+
+mobius.invert <- function(n){
+  f <- factorize(n)
+  d <- unique(apply(f^t(expand.grid(lapply(1:length(f),function(...){0:1}))),2,prod))
+  sum(mobius(d)*divisor(n/d,k=0))
+}
+
+jj <- c(1:10,1000:1030)
+test(sapply(jj,mobius.invert)-1)
diff --git a/vignettes/elliptic.bib b/vignettes/elliptic.bib
new file mode 100644
index 0000000..314e65f
--- /dev/null
+++ b/vignettes/elliptic.bib
@@ -0,0 +1,199 @@
+ at Article{kotus2003,
+  author = 	 {J. Kotus and M. Urba\'{n}ski},
+  title = 	 {Hausdorff {D}imension and {H}ausdorff {M}easures of {J}ulia sets of elliptic functions},
+  journal = 	 {Bulletin of the London Mathematical Society},
+  year = 	 {2003},
+  volume =	 {35},
+  pages =	 {269--275}}
+
+ at Article{erdos2000,
+  author = 	 {P. Erd\"{o}s},
+  title = 	 {Spiraling the {E}arth with {C}. {G}. {J}. {J}acobi},
+  journal = 	 {American Journal of Physics},
+  year = 	 {2000},
+  volume =	 {68},
+  number =	 {10},
+  pages =	 {888--895}}
+
+ at Article{carlson2004,
+  author = 	 {B. C. Carlson},
+  title = 	 {Symmetry in c, d, n of {J}acobian Elliptic Functions},
+  journal = 	 {Journal of Mathematical Analysis and Applications},
+  year = 	 {2004},
+  volume =	 {299},
+  pages =	 {242--253}}
+
+ at Article{vyrdis1999,
+  author = 	 {D. G. Vyrdis and S. D. Panteliou and I. N. Katz},
+  title = 	 {An Inverse Convergence Approach for Arguments of {J}acobian Elliptic Functions},
+  journal = 	 {Computers and Mathematics with Applications},
+  year = 	 {1999},
+  volume =	 {37},
+  pages =	 {21--26}}
+
+ at Article{paszkowski1997,
+  author = 	 {S. Paszkowski},
+  title = 	 {Fast Convergent Quasipower Series for Some Elementary and Special Functions},
+  journal = 	 {Computers and Mathematics with Applications},
+  year = 	 {1997},
+  volume =	 {33},
+  number =       {1/2},
+  pages =	 {181--191}}
+
+ at Article{chow2002,
+  author = 	 {K. W. Chow},
+  title = 	 {A Class of Doubly Periodic Waves for Nonlinear Evolution Equations},
+  journal = 	 {Wave Motion},
+  year = 	 {2002},
+  volume =	 {35},
+  pages =	 {71--90}}
+
+ at Book{whittaker1952,
+  author =	 {E. T. Whittaker and G. N. Watson},
+  title = 	 {A Course of Modern Analysis},
+  publisher = 	 {Cambridge University Press},
+  year = 	 {1952},
+  edition =	 {Fourth}}
+
+
+ at Book{chandrasekharan1985,
+  author =	 {K. Chandrasekharan},
+  title = 	 {Elliptic Functions},
+  publisher = 	 {Springer-Verlag},
+  year = 	 {1985}}
+
+
+ at Article{koopman1991,
+  author = 	 {D. C. Koopman and H. H. Lee},
+  title = 	 {Second-Order Reversible Reactions and Diffusion in a Slab-Like Medium: an Application of the {W}eierstrass Elliptic Pe Function},
+  journal = 	 {Chemical Engineering Science},
+  year = 	 {1991},
+  volume =	 {46},
+  number =	 {4},
+  pages =	 {1165--1177}}
+
+ at Article{johnson2004,
+  author = 	 {E. R. Johnson and {McDonald}, N. Robb},
+  title = 	 {The Motion of a Vortex Near Two Circular Cylinders},
+  journal = 	 {Proceedings of the Royal Society of London A},
+  year = 	 {2004},
+  volume =	 {460},
+  pages =	 {939--954}}
+
+ at Article{johnson2005,
+  author = 	 {E. R. Johnson and {McDonald}, N. Robb},
+  title = 	 {Vortices Near Barriers With Multiple Gaps},
+  journal = 	 {Journal of Fluid Mechanics},
+  year = 	 {2005},
+  volume =	 {531},
+  pages =	 {335--358}}
+
+
+ at Article{kraniotis2002,
+  author = 	 {G. V. Kraniotis and S. B. Whitehouse},
+  title = 	 {General Relativity, the Cosmological Constant and Modular Forms},
+  journal = 	 {Classical and Quantum Gravity},
+  year = 	 {2002},
+  volume =	 {19},
+  pages =	 {5073--5100}}
+
+ at Book{abramowitz1965,
+  author = 	 {M. Abramowitz and I. A. Stegun},
+  title = 	 {Handbook of Mathematical Functions},
+  publisher = 	 {New York: Dover},
+  year = 	 {1965}}
+
+ at Misc{batut2000,
+  author =	 {C. Batut and others},
+  title =	 {User's guide to \proglang{PARI}/\proglang{GP}},
+  howpublished = {Technical reference manual},
+  year =	 {2000},
+  url =	         "http://www.parigp-home.de/"
+}
+
+ at Book{milne1949,
+  author = 	 {L. M. Milne-Thomson},
+  title = 	 {Theoretical Hydrodynamics},
+  publisher = 	 {Macmillan},
+  year = 	 {1949},
+  edition =	 {Second}}
+
+ at Article{thaller1998,
+  author = 	 {B. Thaller},
+  title = 	 {Visualization of Complex Functions},
+  journal = 	 {The Mathematica Journal},
+  year = 	 {1998},
+  volume =	 {7},
+  number =	 {2},
+  pages =	 {163--180}}
+
+ at Book{feynman1966,
+  author = 	 {R. P. Feynman and R. B. Leighton and M. Sands},
+  title = 	 {The {F}eynman Lectures on Physics},
+  publisher = 	 {Addison Wesley},
+  year = 	 {1966},
+  volume = 	 {1}}
+
+ at Misc{weisstein2005,
+  author =	 {E. W. Weisstein},
+  title =	 {Jacobi Elliptic Functions},
+  year =	 {2005},
+  note =	 {from {M}athworld--a {W}olfram web resource; \code{http://mathworld.wolfram.com/JacobiEllipticFunctions.html}}}
+
+ at Article{kennedy2001,
+  author = 	 {M. C. Kennedy and A. O'Hagan},
+  title = 	 {Bayesian Calibration of Computer Models},
+  journal = 	 {Journal of the Royal Statistical Society B},
+  year = 	 {2001},
+  volume =	 {63},
+  number =	 {3},
+  pages =	 {425--464}}
+
+
+ at TechReport{kennedy2001a,
+  author = 	 {M. C. Kennedy and A. O'Hagan},
+  title = 	 {Supplementary Details on Bayesian Calibration of Computer Models},
+  institution =  {University of Sheffield},
+  year = 	 {2001},
+  url =	         "http://www.shef.ac.uk/~st1ao/ps/calsup.ps"
+}
+
+ at Article{hankin2005,
+  author = 	 {R. K. S. Hankin},
+  title = 	 {Introducing {B}{A}{C}{C}{O}, an \proglang{R} package for Bayesian Analysis of Computer Code Output},
+  journal = 	 {Journal of Statistical Software},
+  year = 	 {2005},
+  volume =	 {14},
+  number =	 {16},
+  month =	 {October}
+}
+
+ at Book{edwards1992,
+  author = 	 {A. W. F. Edwards},
+  title = 	 {Likelihood (Expanded Edition)},
+  publisher = 	 {Johns Hopkins},
+  year = 	 {1992}
+}
+
+ at Manual{rcore2005,
+    title = {\proglang{R}: A language and environment for statistical computing},
+    author = {{\proglang{R} Development Core Team}},
+    organization = {\proglang{R} Foundation for Statistical Computing},
+    address = {Vienna, Austria},
+    year = {2005},
+    note = {{ISBN} 3-900051-07-0},
+    url = {http://www.R-project.org},
+  }
+
+
+ at Article{hankin2006,
+  author = 	 {R. K. S. Hankin},
+  title = 	 {Introducing elliptic, an \proglang{R} package for elliptic
+	 and modular functions},
+  journal = 	 {Journal of Statistical Software},
+  year = 	 {2006},
+  volume =	 {15},
+  number =	 {7},
+  month =	 {February}
+}
+
diff --git a/vignettes/ellipticpaper.Rnw b/vignettes/ellipticpaper.Rnw
new file mode 100644
index 0000000..0b9d095
--- /dev/null
+++ b/vignettes/ellipticpaper.Rnw
@@ -0,0 +1,1421 @@
+\documentclass[nojss]{jss}
+
+\usepackage{dsfont}
+\usepackage{bbm}
+\usepackage{amsfonts}
+\usepackage{wasysym}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%% just as usual
+\author{Robin K. S. Hankin}
+\title{Introducing \pkg{elliptic}, an \proglang{R} package for elliptic and
+  modular functions}
+%\VignetteIndexEntry{A vignette for the elliptic package}
+%% for pretty printing and a nice hypersummary also set:
+%% \Plainauthor{Achim Zeileis, Second Author} %% comma-separated
+\Plaintitle{Introducing elliptic, an R package for elliptic and
+  modular functions}
+\Shorttitle{Elliptic functions with \proglang{R}}
+
+%% an abstract and keywords
+\Abstract{
+
+This paper introduces the \pkg{elliptic} package of \proglang{R} routines, for
+numerical calculation of elliptic and related functions.  Elliptic
+functions furnish interesting and instructive examples of many ideas
+of complex analysis, and package \pkg{elliptic} illustrates these
+numerically and visually.  A statistical application in fluid
+mechanics is presented.
+
+An earlier version of this vignette was published as~\citet{hankin2006}. 
+}
+
+\Keywords{Elliptic functions, modular functions, Weierstrass elliptic
+functions, visualization of complex functions}
+
+
+
+
+\Address{
+  Robin K. S. Hankin\\
+  Auckland University of Technology\\
+  2-14 Wakefield Street\\
+  Auckland\\
+  New Zealand\\
+  E-mail: \email{hankin.robin at gmail.com}
+}
+
+
+%% need no \usepackage{Sweave.sty}
+\SweaveOpts{echo=FALSE}
+\begin{document}
+
+
+<<requirepackage,echo=FALSE,print=FALSE>>=
+require(elliptic,quietly=TRUE)
+@ 
+
+
+<<setOverallImageQuality>>=
+n <- 400
+n_BACCO <- 40
+@ 
+
+\section{Introduction}
+
+The elliptic functions crop up here and there in diverse areas of
+applied mathematics such as cosmology~\citep{kraniotis2002}, chemical
+engineering~\citep{koopman1991}, dynamical systems~\citep{kotus2003},
+and quantum mechanics~\citep{chow2002}; here they are applied to fluid
+mechanics~\citep{johnson2004,johnson2005}.  They also provide
+interesting and instructive non-elementary examples of many results in
+complex analysis such as Cauchy's integral theorem and the residue
+theorem.
+
+In this paper I introduce \pkg{elliptic}, a new \proglang{R} package for
+numerical calculation of Weierstrass and Jacobi elliptic functions,
+theta functions and modular functions.  The emphasis is on efficient
+numerical calculation, and informative visualization techniques.
+
+The package is available on CRAN, \url{http://cran.R-project.org/}
+\citep{rcore2005}. 
+
+\section{Elliptic functions}\label{section:introduction}
+
+This section gives a very brief introduction to elliptic functions.
+For more detail and rigorous derivations, the reader is referred to
+the classic literature: the standard reference would
+be~\cite{whittaker1952}.  \cite{chandrasekharan1985} approaches the
+field from a more modern perspective, and \cite{abramowitz1965}
+provide the definitive reference work for the case of real invariants.
+
+A meromorphic function~$f$ is said to be elliptic
+if~$\exists\,\omega_1,\omega_2\in\mathbbm{C}$
+with~$\omega_2/\omega_1\in\mathbbm{C}\backslash\mathbbm{R}$ such that
+
+\begin{equation}
+f(z)=f(z+2m\omega_1+2n\omega_2)
+\end{equation}
+whenever~$f(z)$ is defined and~$m,n\in\mathbbm{Z}$.  Notation in this
+paper is consistent with that of~\citet{abramowitz1965}; $\omega_1$
+and~$\omega_2$ are called the {\em half periods}.  In 1862,
+Weierstrass introduced his~$\wp$ function which is defined as
+\begin{equation}\label{direct.sum}
+\wp(z)=
+\frac{1}{z^2}+
+\sum_{m,n\in\mathbbm{Z}\atop m,n\neq 0}
+\left\{
+   \frac{1}{\left(z-2m\omega_1-2n\omega_2\right)^2}
+  -\frac{1}{\left(  2m\omega_1+2n\omega_2\right)^2}
+\right\}.
+\end{equation}
+The~$\wp$ function is, in a well defined sense, the simplest
+nontrivial elliptic function~\citep{whittaker1952}.  Given this, we
+have a Laurent expansion of the form
+\begin{equation}
+\wp(z)-z^{-2}=\frac{1}{20}g_2z^2+\frac{1}{28}g_3z^4+O(z^6)
+\end{equation}
+with
+\begin{equation}
+g_2=60{\sum}'\frac{1}{\left(2m\omega_1+2n\omega_2\right)^4},
+\qquad
+g_3=140{\sum}'\frac{1}{\left(2m\omega_1+2n\omega_2\right)^6},
+\end{equation}
+where a prime indicates summation over~$\mathbbm{Z}^2$
+excluding~$(m,n)=(0,0)$.  For reasons to be made clear in
+section~\ref{section.unimodularity}, $g_2$ and~$g_3$ are known as the
+{\em invariants}.  Other equivalent forms for~$\wp$ include its
+differential equation
+\begin{equation}\label{P.differential.eqn.definition}
+\left(\frac{d\wp}{dz}\right)^2=4\wp^3-g_2\wp-g_3
+\end{equation}
+and the relation
+\begin{equation}\label{P.integral.definition}
+z=\int_{t=w}^\infty\frac{dt}{\sqrt{4t^3-g_2t-g_3}}
+\end{equation}
+which is equivalent to~$w=\wp(z)$.
+
+Related functions include the zeta function~$\zeta(z)$, defined by 
+\begin{equation}\label{zeta.definition}
+\frac{d\zeta(z)}{dz}=-\wp(z)
+\end{equation}
+and the sigma function~$\sigma(z)$, defined by
+\begin{equation}\label{sigma.definition}
+\frac{d\log\sigma(z)}{dz}=\zeta(z),\qquad{\lim_{\mbox{\tiny $z\longrightarrow
+0$}}}\left[\frac{\sigma(z)}{z}\right]=1
+\end{equation}
+(neither~$\sigma(z)$ nor~$\zeta(z)$ is elliptic).  It may be
+shown\label{zeta.analytic} that~$\zeta(z)$ is analytic except for
+points on the lattice of periods, at which it has simple poles with
+residue~1.  One classic result is due to Legendre:
+if~$\omega_1,\omega_2$ is a pair of basic periods\footnote{A pair of
+basic periods is one that generates the period lattice.  Basic periods
+are not unique as many pairs of periods may generate the same lattice.
+However, there is one pair of basic periods, the {\em fundamental}
+periods that are, in a well-defined sense,
+optimal~\citep{chandrasekharan1985}.},
+with~$\rm{Im}(\omega_2/\omega_1)>0$, then
+\begin{equation}
+\eta_1\omega_2-\eta_2\omega_1=\pi i\label{legendre}
+\end{equation}
+where~$\eta_1=\zeta(\omega_1)$ and~$\eta_2=\zeta(\omega_2)$.
+
+\subsection{Jacobian elliptic functions}
+Jacobi approached the description of elliptic functions from a
+different perspective~\citep{weisstein2005}.  Given~$m=k^2$ and~$m_1$
+with~$m+m_1=1$, Jacobi showed that if
+\[
+u=\int_{t=0}^\phi\frac{dt}{\sqrt{(1-t^2)(1-mt^2)}}
+\]
+the functions~${\rm sn}(u,k)$, ${\rm cn}(u,k)$ and~${\rm dn}(u,k)$
+defined by
+\begin{equation}\label{sn.definition}
+{\rm sn} u=\sin\phi,\qquad
+{\rm cn} u=\cos\phi,\qquad
+{\rm dn} u=\sqrt{1-k^2\sin^2\phi}
+\end{equation}
+are elliptic with periods 
+\begin{equation}
+K=\int_{\theta=0}^{\pi/2}\frac{d\theta}{\sqrt{1-m\sin^2\theta}}
+\end{equation}
+and
+\begin{equation}
+iK'=i\int_{\theta=0}^{\pi/2}\frac{d\theta}{\sqrt{1-m_1\sin^2\theta}}.
+\end{equation}
+The Jacobian elliptic functions are encountered in a variety of
+contexts and bear a simple analytical relation with the
+Weierstrass~$\wp$ function.
+
+\section{Numerical evaluation and Jacobi's theta functions}
+
+Although equation~\ref{direct.sum} is absolutely convergent, it
+converges too slowly to be of use in practical work, and an alternative
+definition is needed.
+
+Jacobi presented his four theta functions in 1829 and, although they
+have interesting properties in their own right, here they are used to
+provide efficient numerical methods for calculation of the elliptic
+functions above.  They are defined as follows:
+
+\begin{eqnarray}\label{theta.defn}
+\theta_1(z,q)&=&2q^{1/4}\sum_{n=0}^\infty(-1)^nq^{n(n+1)}\sin(2n+1)z\\
+\theta_2(z,q)&=&2q^{1/4}\sum_{n=0}^\infty q^{n(n+1)}\cos(2n+1)z\\
+\theta_3(z,q)&=&1+2\sum_{n=1}^\infty q^{n^2}\cos 2nz\\
+\theta_4(z,q)&=&1+2\sum_{n=1}^\infty(-1)^n q^{n^2}\cos 2nz
+\end{eqnarray}
+It may be seen that, provided~$|q|<1$, the series converges for
+all~$z\in\mathbbm{C}$.  Indeed, the convergence is very rapid: the
+number of correct significant figures goes as the square of the number
+of terms.  It should be noted that there are different notations in
+use, both for the four function names, and for the independent
+variables.
+
+All the functions described in section~\ref{section:introduction} may
+be expressed in terms of the theta functions.  This is the default
+method in \pkg{elliptic}, although alternative algorithms are
+implemented where possible to provide a numerical and notational
+check.
+
+For example, Weierstrass's~$\wp$ function is given by
+\begin{equation}\label{P.in.terms.of.theta}
+\wp(z)= 
+\frac{\pi^2}{4\omega_1^2}\left(
+   \frac{\theta_1'(0,q)\theta_2(v,q)}{\theta_2(0,q)\theta_1(v,q)}
+\right)^2
+\end{equation}
+where~$q=e^{i\omega_2/\omega_1}$; the other functions have similar
+theta function definitions.
+
+<<require_packages, echo=FALSE,print=FALSE>>=
+<<results=hide>>=
+require(elliptic)
+require(emulator)
+require(calibrator)
+@ 
+
+\section[Package ''elliptic'' in use]{Package \pkg{elliptic} in use}
+
+This section shows \pkg{elliptic} being used in a variety of contexts.
+First, a number of numerical verifications of the code are presented;
+then, elliptic and related functions are visualized using the function
+\code{view()}; and finally, the package is used to calculate flows
+occurring in an oceanographic context.
+
+The primary function in package \pkg{elliptic} is~\code{P()}: this
+calculates the Weierstrass~$\wp$ function, and may take named
+arguments that specify either the invariants~\code{g} or half
+periods~\code{Omega}:
+<<simple_usage_of_P,echo=TRUE,print=FALSE>>=
+z <- 1.9+1.8i
+P(z,g=c(1,1i))
+P(z,Omega=c(1,1i))
+@ 
+
+\subsection{Numerical verification}
+
+Work in the field of elliptic functions is very liable to
+mistakes\footnote{\cite{abramowitz1965} state that there is a
+``bewildering'' variety of notations in use; the situation has become
+more confusing in the intervening 40 years.}, and package
+\pkg{elliptic} includes a number of numerical checks to guard against
+notational inexactitude.  These checks generally use the convenient
+(trivial) function \code{maxdiff()} that shows the maximum absolute
+difference between its two arguments:
+<<define_maxdiff,echo=TRUE,print=FALSE>>=
+maxdiff <- function(x,y){max(abs(x-y))}
+@ 
+
+For example, we may compare the output of \code{P()}, which uses
+equation~\ref{P.in.terms.of.theta}, against the straightforward
+Laurent expansion, used by \code{P.laurent()}:
+
+<<laurent,echo=TRUE,print=FALSE>>=
+g <- c(3,2+4i)
+z <- seq(from=1,to=0.4+1i,len=34)
+<<maxdiff_laurent,echo=TRUE,print=TRUE>>=
+maxdiff(P(z,g), P.laurent(z,g))
+@
+
+showing reasonable agreement; note that function \code{P()} uses the
+conceptually distinct theta function formula of
+equation~\ref{P.in.terms.of.theta}.  Package \pkg{elliptic} includes a
+large number of such numerical verification tests in the \code{test}
+suite provided in the package, but perhaps more germane is the
+inclusion of named identities appearing in \cite{abramowitz1965}.  For
+example, consider function~\code{e18.10.9()}, named for the equation
+number of the identity appearing on page 650.  This function returns
+the difference between the (algebraically identical) left and right
+hand side of three grouped identities:
+\begin{eqnarray}
+  12\omega_1^2e_1 &=& \hphantom{-}\pi^2\left[\theta_3^4(0,q)+\theta_4^4(0,q)\right]\nonumber\\
+  12\omega_1^2e_2 &=& \hphantom{-}\pi^2\left[\theta_2^4(0,q)-\theta_4^4(0,q)\right]\\
+  12\omega_1^2e_3 &=&           - \pi^2\left[\theta_3^4(0,q)+\theta_4^4(0,q)\right]\nonumber
+\end{eqnarray}
+where~$q=e^{-\pi K'/K}$.  From the manpage:
+
+<<abs_e18.10.9,echo=TRUE,print=TRUE>>=
+abs( e18.10.9(parameters(g=g)))
+@
+again showing reasonably accurate numerical results, but perhaps
+more importantly explicitly verifying that the notational scheme used
+is internally consistent.
+
+Although the examples above use the invariants~\code{g2} and \code{g3}
+to define the elliptic function and its periods, sometimes the
+fundamental periods are known and the invariants are desired.  This is
+done by function \code{g.fun()}, which takes the fundamental periods
+as arguments and returns the two invariants~$g_2$ and~$g_3$.  Observe
+that there are many pairs of basic periods that generate the same
+lattice---see figure~\ref{latplot}---but it usual to specify the
+unique {\em fundamental periods} as this pair usually has desirable
+numerical convergence properties.
+
+\begin{figure}[htbp]
+  \begin{center}
+<<lattice_figure,fig=TRUE>>=
+jj <- parameters(g=c(1+1i,2-3i))$Omega
+latplot(jj,xlim=c(-4,4),ylim=c(-4,4),xlab="Re(z)",
+     ylab="Im(z)")
+polygon(Re(c(jj[1],sum(jj),jj[2],0)),
+        Im(c(jj[1],sum(jj),jj[2],0)),lwd=2,col="gray90",pch=16,cex=3)
+
+kk <- -c(3*jj[1] + 2*jj[2] , jj[1] + jj[2])  #det(matrix(c(3,2,1,1),2,2,T)==1
+
+polygon(Re(c(kk[1],sum(kk),kk[2],0)),
+        Im(c(kk[1],sum(kk),kk[2],0)),lwd=2,col="gray30",pch=16,cex=3)
+@
+\caption{The\label{latplot} lattice generated by~$\wp(z;1+i,2-3i)$;
+  fundamental period parallelogram shown in light gray and a basic
+  period parallelogram shown in darker gray}
+  \end{center}
+\end{figure}
+
+\subsubsection{Unimodularity}\label{section.unimodularity}
+Many functions of the package are {\em unimodular}.  The
+invariants~$g_2$ and~$g_3$ are defined in terms of a pair of basic
+periods~$\omega_1$ and~$\omega_2$.  However, any pair of basic periods
+should have the same invariants, because any pair of basic periods
+will define the same elliptic function (hence the name).  Basic period
+pairs are related by a unimodular transformation:
+if~$\omega_1,\omega_2$ and~$\tilde{\omega}_1,\tilde{\omega}_2$ are two
+pairs of basic periods then there exist integers~$a,b,c,d$
+with~$ad-bc=1$ and
+\[
+\left(
+\begin{array}{cc}
+a&b\\
+c&d
+\end{array}
+\right)
+\left(
+\!\!
+\begin{array}{c}
+\omega_1\\
+\omega_2
+\end{array}
+\!\!
+\right)=
+\left(\!\!
+\begin{array}{c}
+\tilde{\omega}_1\\
+\tilde{\omega}_2
+\end{array}
+\!\!
+\right)
+\]
+
+Formally, a
+unimodular function~$f(\cdot,\cdot)$ is one with arity~2---it is
+conventional to write~$f(\mathbf{v})=f(a,b)$---and for which
+\begin{equation}
+f(\mathbf{v})=f(\mathbf{M}\mathbf{v})\end{equation} where~$\mathbf{M}$
+is unimodular: that is, an integer matrix with a determinant of unity.
+In this context, unimodular matrices (and the transformations they
+define) are interesting because any two pairs of basic periods are
+related by a unimodular transformation.
+
+The package includes
+functions that generate unimodular matrices.  The underlying function
+is \code{congruence()}, which generates~$2\times 2$ integer matrices
+with a determinant of~1, given the first row.  For example:
+<<congruence,echo=TRUE,print=TRUE>>=
+M <- congruence(c(4,9))
+@
+(observe that the determinant of~$\mathbf{M}$ is unity) and thus we
+may verify the unimodularity of, for example, \code{g.fun()} by
+evaluating the invariants for a pair of fundamental periods, and
+comparing this with the invariants calculated for a pair of basic
+periods that are related to the fundamental periods by a unimodular
+transformation (here~$\mathbf{M}$).  In \proglang{R} idiom:
+<<define_o,echo=TRUE,print=FALSE>>=
+o <- c(1,1i)
+<<maxdiff_o,echo=TRUE,print=TRUE>>=
+maxdiff(g.fun(o), g.fun(M %*% o,maxiter=800))
+@
+showing that the invariants for period pair~$o=(1,i)^T$ are almost
+identical to those for period
+pair~$o'=\mathbf{M}o=(4+9i,3+7i)^T$.  Observe that the latter
+evaluation requires many more iterations for accurate numerical
+evaluation: this behaviour is typically encountered when considering
+periods whose ratio is close to the real axis.
+
+In addition, function \code{unimodular()} generates unimodular
+matrices systematically, and function \code{unimodularity()} checks
+for a function's being unimodular.
+
+\subsubsection{Contour integration and the residue theorem}
+
+As noted in section~\ref{zeta.analytic}, the zeta function~$\zeta(z)$
+possesses a simple pole of residue~1 at the origin.  The residue
+theorem would imply that
+\[
+\varoint\zeta(z)\,dz=2\pi i
+\]
+when the contour is taken round a path that encircles the origin but
+no other poles.  This may be verified numerically using
+\pkg{elliptic}'s \code{myintegrate} suite of functions, which
+generalize the \pkg{stats} package's \code{integrate()} function to
+the complex plane.  Here, function \code{integrate.contour()} is used
+to integrate round the unit circle.  This function takes three
+arguments: first, the function to be integrated; second, a function
+that describes the contour to be integrated along; and third, a
+function that describes the derivative of the contour.  We may now
+integrate over a closed loop, using arguments~\code{u}
+and~\code{udash} which specify a contour following the unit circle:
+
+<<u_udash,echo=FALSE,print=FALSE>>=
+u <- function(x){exp(pi*2i*x)}
+udash <- function(x){pi*2i*exp(pi*2i*x)}
+Zeta <- function(z){zeta(z,g)}
+Sigma <- function(z){sigma(z,g)}
+WeierstrassP <- function(z){P(z,g)}
+@ 
+
+<<integrate,echo=TRUE,print=FALSE>>=
+jj <- integrate.contour(Zeta,u,udash)
+<<maxdiff_integrate,echo=TRUE,print=TRUE>>=
+maxdiff(jj, 2*pi*1i)
+@
+showing reasonable numerical accuracy.  Compare Weierstrass's~$\wp$
+function, which has a second order pole at the origin:
+<<abs_integrate,echo=TRUE,print=TRUE>>=
+abs(integrate.contour(WeierstrassP,u,udash))
+@
+
+\subsubsection[The PARI system]{The \proglang{PARI} system}
+Perhaps the most convincing evidence for numerical accuracy and
+consistency of notation in the software presented here is provided by
+comparison of the package's results with that of
+\proglang{PARI}~\citep{batut2000}.  The \proglang{PARI} system is an open-source project
+aimed at number theorists, with an emphasis on pure mathematics; it
+includes some elliptic function capability.  Function \code{P.pari()}
+of package \pkg{elliptic} calls the \code{pari} system directly to
+evaluate elliptic functions from within an \proglang{R} session, enabling quick
+verification:
+
+\begin{Schunk}
+\begin{Sinput}
+> omega <- c(1,1i)
+\end{Sinput}
+\end{Schunk}
+\begin{Schunk}
+\begin{Sinput}
+> z <- seq(from=pi,to=pi*1i,len=10)
+\end{Sinput}
+\end{Schunk}
+\begin{Schunk}
+\begin{Sinput}
+> maxdiff(P.pari(z,Omega=omega),  P(z,params=parameters(Omega=omega)))
+\end{Sinput}
+\begin{Soutput}
+[1] 2.760239e-14
+\end{Soutput}
+\end{Schunk}
+
+again showing reasonable agreement, this time between two independent
+computational systems.
+
+\subsection{Visualization of complex functions}
+
+In the following, a Weierstrass elliptic function with invariants
+of~$1+i$ and~$2-3i$ will be considered.  The half periods
+$\omega_1,\omega_2$ are first evaluated:
+
+<<jj_omega,echo=TRUE,print=FALSE>>=
+jj.omega <- half.periods(g=c(1+1i,2-3i))
+@ 
+and these may be visualized by using \code{latplot()}, as in
+figure~\ref{latplot}.  Figure~\ref{P.persp.re} shows the real part of
+such a function, shown over part of the complex plane, and
+figure~\ref{P.view} shows the same function using the \code{view()}
+function.
+
+<<calculate_wp_figure,echo=FALSE,print=FALSE,cache=TRUE>>=
+x <- seq(from=-4, to=4, len=n)
+y <- x
+z <- outer(x,1i*x, "+")
+f <- P(z, c(1+1i,2-3i))
+@ 
+
+
+%% Thanks to Dario Strbenac for the following structure
+<<wp_figure_file>>=   
+png("wp_figure.png",width=800,height=800)
+@ 
+
+<<label=wp_figure_plot>>=
+persp(x, y, limit(Re(f)), xlab="Re(z)",ylab="Im(z)",zlab="Re(P(z))",
+theta=30, phi=30, r=1e9, border=NA, shade=0.8, expand=0.6)
+@ 
+
+<<label=wp_figure_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{wp_figure.png}
+    \caption{Real part of~$\wp(z,1,1+2i)$.  Note \label{P.persp.re}
+    the second order poles at each lattice point}
+  \end{center}
+\end{figure}
+
+
+<<thallerfig_file>>=
+png("thallerfig.png",width=800,height=800)
+@ 
+
+<<label=thallerfig_plot>>=
+par(pty="s")
+view(x,y,f,code=0,real.contour=FALSE, imag.contour=FALSE,drawlabel=FALSE,col="red",axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos = -4)
+axis(2,pos = -4)
+lines(x=c(-4,4),y=c(4,4))
+lines(y=c(-4,4),x=c(4,4))
+@ 
+
+<<label=thallerfig_close>>=
+null <- dev.off()
+@ 
+
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{thallerfig.png}
+    \caption{Visualization of~$\wp(z,1,1+2i)$\label{P.view} using the
+      scheme of \cite{thaller1998}: white corresponds to a pole, black
+      to a zero, and full saturation to~$|\wp(z)|=1$.  The poles
+      of~$\wp(z)$ occur on a regular lattice, and the zeros on two
+      shifted lattices.  Note how each of the poles is surrounded by
+      two cycles of hue, indicating that they are of second order; and
+      each of the zeros is surrounded by one cycle of hue, indicating
+      that they are simple roots}
+  \end{center}
+\end{figure}
+
+The~$\sigma$ function with the same invariants is visualized in
+figure~\ref{sigma.green}, showing that its zeros lie on the same
+lattice as figure~\ref{latplot}.
+
+<<sigma_green_calc,cache=TRUE,echo=FALSE,print=FALSE>>=
+x <- seq(from= -12, to=12, len=n)
+y <- x
+z <- outer(x, 1i*y, "+")
+f <- sigma(z, c(1+1i,2-3i))
+@ 
+
+<<sigma_green_file>>=
+png("sigma_green.png",width=800,height=800)
+@ 
+
+<<sigma_green_plot>>=
+par(pty="s")
+view(x,y,f,scheme=4,real.contour=FALSE,drawlabels=FALSE,axes=FALSE, xlab="Re(z)",ylab="Im(z)")
+axis(1,pos= -12)
+axis(2,pos= -12)
+lines(x=c(-12,12),y=c(12,12))
+lines(y=c(-12,12),x=c(12,12))
+lines(x=c(-12,12),y=-c(12,12))
+lines(y=c(-12,12),x=-c(12,12))
+@ 
+
+
+<<sigma_green_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{sigma_green.png}
+    \caption{Visualization of~$f=\sigma(z,1,1+2i)$ using
+    \code{view()}; colour indicates~${\rm Arg}(f)$.  Thus points at
+    which~$f(z)$ is on the negative real axis, that is
+    $\{z:f(z)\in\mathbbm{R}^-\}$, are visible as discontinuities of
+    (colourimetric) value.  These discontinuities are semi-infinite;
+    note that the zeros of~$f$ occur, \label{sigma.green} at the
+    (finite) end of each line, on a regular lattice.  As~$|z|$
+    increases, each discontinuity threads its way through an
+    increasing number of other discontinuities and zeros, and the
+    spacing between the discontinuities becomes less and less}
+  \end{center}
+\end{figure}
+
+Figure~\ref{zeta.thaller} shows the zeta function, and
+figure~\ref{sn.thaller} shows Jacobi's ``sn'' function.
+
+<<calculate_zeta,echo=FALSE,print=FALSE,cache=TRUE>>=
+zeta.z <- zeta(z, c(1+1i,2-3i))
+@ 
+
+<<zetafig_file>>=
+png("zetafig.png",width=800,height=800)
+@ 
+
+<<label=zetafig_plot>>=
+par(pty="s")
+view(x,y,zeta.z,scheme=0,real.contour=FALSE,drawlabels=FALSE,xlab="Re(z)",ylab="Im(z)")
+@ 
+
+<<label=zetafig_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{zetafig.png}
+    \caption{Visualization of~$\zeta(z,1,1+2i)$ using \code{view()}
+    and the colouring scheme of Thaller.  Poles appear as white
+    regions, and zeros as black regions. \label{zeta.thaller} Each
+    pole is of single order, each zero is a simple root (one cycle of
+    hue).  The poles occur on a lattice; there is no simple pattern to
+    the zeros.  Note the overall tendency towards the edges of the
+    square to whiteness: $|f|$ increases with~$|z|$ as per
+    equation~\ref{zeta.definition}}
+  \end{center}
+\end{figure}
+
+<<calculate_sn,echo=FALSE,print=FALSE,cache=TRUE>>=
+jj <- seq(from=-40,to=40,len=n)
+m <- outer(jj,1i*jj,"+")
+f <- sn(u=5-2i,m=m,maxiter=1000)
+@ 
+
+<<sn_figure_file>>=
+png("sn_figure.png",width=800,height=800)
+@ 
+
+<<sn_figure_plot>>=
+par(pty="s")
+ view(jj,jj,f,scheme=0,r0=1/5,real=T,imag=F,levels=c(0,-0.4,-1),drawlabels=F,xlab="Re(m)",ylab="Im(m)")
+@ 
+
+<<sn_figure_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{sn_figure.png}
+    \caption{Jacobi's ``sn'' function\label{sn.thaller} using the elliptic
+      package.  Here, $f={\rm sn}(5-2i,m)$ is visualized, with background
+      utilizing Thaller's scheme, and contours of equal~${\rm Re}(f)$ at
+      three selected values shown as black lines.  Note the aperiodic
+      arrangement of poles (white areas) and zeros (black areas)}
+  \end{center}
+\end{figure}
+
+
+\subsection{Potential flow}
+
+One application of complex analysis is to fluid dynamics.  In
+particular, potential flow (steady, two-dimensional, inviscid,
+incompressible) may be studied with the aid of analytic complex
+functions.  Here I show how the elliptic functions discussed in this
+paper may be used to simulate potential flow in a rectangular domain.
+
+Although the tenets of potential flow appear to be absurdly
+idealized\footnote{\cite{feynman1966} famously described potential flow
+as the study of ``dry water''}, it is nevertheless a useful technique
+in many branches of practical fluid mechanics: it is often used to
+calculate a ``theoretical'' flowfield with which measured velocities
+may be compared.  A short sketch of potential theory is given here but
+the reader is referred to~\cite{milne1949} for a full exposition.
+Briefly, we define a {\em complex potential} $w(z)$ to be a complex
+function
+\[
+w(z)=\phi+i\psi\] and observe that both~$\phi$ and~$\psi$ obey
+Laplace's equation if~$w$ is differentiable.  Given this, we may take
+the velocity vector~$\mathbf{v}=(v_x,v_y)$ of the fluid to be
+\[
+v_x=\frac{\partial\phi}{\partial x},\qquad
+v_y=\frac{\partial\phi}{\partial y},\qquad
+\]
+and observe that streamlines are given by contours of equal~$\psi$;
+contours of equal~$\phi$ are called equipotential lines.  The two
+systems of lines cross at right angles (this follows from the
+Cauchy-Riemann conditions).
+
+Consider, for example, the function~$w(z)=z^2$, whose associated flow
+field is shown in figure~\ref{z.squared.pot.flow}.  This corresponds
+to a stagnation point, at which the speed vanishes; the streamlines
+(solid) intersect the equipotential lines (dashed) at right angles.
+
+<<stag_calc,echo=FALSE,print=FALSE,cache=TRUE>>=
+     f <- function(z){1i*z^2}
+     x <- seq(from=-6,to=6,len=n)
+     y <- seq(from=-6,to=6,len=n)
+     z <- outer(x,1i*y,"+")
+@ 
+
+
+<<stag_point_file>>=
+png("stag_point.png",width=800,height=800)
+@ 
+
+<<stag_point_plot>>=
+par(pty="s")
+view(x,y,f(z),nlevels=14,imag.contour=TRUE,real.cont=TRUE,scheme=-1, 
+     drawlabels=FALSE,
+     axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos=-6)
+axis(2,pos=-6)
+lines(x=c(-6,6),y=c(6,6))
+lines(y=c(-6,6),x=c(6,6))
+d1 <- c(-0.1,0,0.1)
+d2 <- c( 0.1,0,0.1)
+lines(x=d1,y=1+d2)
+lines(x=d1,y=-1-d2)
+lines(x=1-d2,y=d1)
+lines(x=-1+d2,y=d1)
+@ 
+
+<<stag_point_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{stag_point.png}
+    \caption{Potential flow on the complex plane:
+    field\label{z.squared.pot.flow} corresponding to the
+    function~$(z)=z^2$.  Solid lines represent streamlines and dotted
+    lines represent equipotentials; these intersect at right angles.
+    Note stagnation point at the origin}
+  \end{center}
+\end{figure}
+
+Now consider a slightly more complicated case.  A point source of
+strength~$m$ at~$z_0$ may be represented by the function
+\[m\log(z-z_0)\] (a sink corresponds to~$m<0$).  Any finite number
+of sources or sinks may be combined, as in~$\sum_i m_i\log(z-z_i)$
+where the~$i^{\rm th}$ source is at~$z_i$ and has strength~$m_i$,
+because the system is linear\footnote{It is often more convenient to
+work with the algebraically equivalent form~$\log\left(\prod
+(z-z_i)^{m_i}\right)$, as there are fewer branch cuts to deal with.}.
+Figure~\ref{upper.halfplane.flow} shows two sources and two sinks, all
+of equal strength.  Because the flowfield is symmetric with respect to
+the real axis, there is no flux across it; we may therefore ignore the
+flow in the lower half plane (ie~$\{z:\rm{Im}(z)<0\}$) and consider
+the flow to be bounded below by the real axis.  This is known as {\em
+the method of images}~\citep{milne1949}.
+
+
+<<two_calc,echo=FALSE,print=FALSE,cache=TRUE>>=
+     f <- function(z){1i*log((z-1.7+3i)*(z-1.7-3i)/(z+1-0.6i)/(z+1+0.6i))}
+     x <- seq(from=-6,to=6,len=n)
+     y <- seq(from=-6,to=6,len=n)
+     z <- outer(x,1i*y,"+")
+@ 
+
+
+<<two_sources_two_sinks_file>>=
+png("two_sources_two_sinks.png",width=800,height=800)
+@ 
+<<label=two_sources_two_sinks_plot>>=
+par(pty="s")
+view(x,y,f(z),nlevels=24,imag.contour=TRUE,real.cont=TRUE,scheme=17,power=0.1,drawlabels=FALSE,axes=FALSE,xlab="Re(z)",ylab="Im(z)")
+axis(1,pos=-6)
+axis(2,pos=-6)
+lines(x=c(-6,6),y=c(6,6))
+lines(y=c(-6,6),x=c(6,6))
+@ 
+
+<<two_sources_two_sinks_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{two_sources_two_sinks.png}
+    \caption{Potential flow in on the complex plane: two sources and
+    two sinks, all of equal strength.  Solid
+    \label{upper.halfplane.flow} lines denote streamlines, dotted
+    lines equipotentials; colour scheme uses the \code{hcl()} system:
+    hue varies with the argument, and chrominance varies with the
+    modulus, of the potential.  There is no flux between the lower and
+    the upper half plane, but there is flux out of, and in to, the
+    diagram.  Note the stagnation point at approximately $5+0i$}
+  \end{center}
+\end{figure}
+
+Now, one may transform a potential flowfield into another form using a
+conformal mapping from the~$z$- plane to the~$\zeta$- plane,
+traditionally denoted
+\[
+\zeta=f(z).
+\]
+
+This technique finds application when flow is desired (in the~$\zeta$-
+plane) that obeys some specific boundary condition that is simple to
+specify in the~$z$- plane. 
+
+%In the present case, we make use of the Schwartz-Christoffel theorem,
+%which states that if~$a,b,c,\ldots$ are~$n$ points on the real axis of
+%the~$\zeta$- plane with~$a<b<c<\ldots$,
+%and~$\alpha,\beta,\gamma,\ldots$ the interior angles of a simple
+%closed polygon of~$n$ vertices, then 
+%\begin{equation}
+%\frac{dz}{d\zeta}=K
+%\left(\zeta-a\right)^{\alpha/\pi-1}
+%\left(\zeta-b\right)^{\beta/\pi-1}
+%\left(\zeta-c\right)^{\gamma/\pi-1}
+%\ldots
+%\end{equation}
+%transforms the real axis of the~$\zeta$- plane into the boundary of a
+%closed polygon in the~$z$- plane with interior
+%angles~$\alpha,\beta,\ldots$.  If the polygon is simple, then the
+%upper half of the~$\zeta$- plane maps to the interior of the polygon.
+%
+%Here the Schwartz Christoffel theorem~\cite{milne1949} is applied to a
+%rectangle, in which~$\alpha=\beta=\gamma=\delta=\pi/2$.  With suitably
+%chosen~$a,b,c,d$ we see that the map from the upper half plane of
+%the~$\zeta$- plane to a rectangle in the~$z$- plane is given by
+
+In this case, we seek a conformal transformation that maps the upper
+half plane to a rectangle.  If we consider the flowfield shown in
+figure~\ref{upper.halfplane.flow}, then the map given by
+\[
+ \zeta=\int\frac{dz}{\sqrt{(1-a^2z^2)(1-z^2)}}
+\]
+takes the upper half plane of the~$\zeta$- plane to a rectangle in
+the~$z$- plane ~\citep{milne1949}.  Using
+equation~\ref{sn.definition}, this is equivalent to~$z={\rm
+sn}(\zeta;m)$, where~${\rm sn}(\cdot,\cdot)$ is a Jacobian elliptic
+function and~$m$ a constant of integration.
+
+Figure~\ref{box.flow} shows the resulting flow field: observe how the
+flow speed, which is proportional to the spacing between the
+streamlines, is very small near the left-hand edge of the rectangle.
+
+
+<<rect_calc3,echo=FALSE,print=FALSE,cache=TRUE>>=
+m <- 0.5
+K <- K.fun(m)
+iK <- K.fun(1-m)
+
+#b <- sn(1.8 + 0.8i, m=m) # 1.8 to the right and 0.8 up.
+#b <- 0 # middle bottom
+b <- sn(0 + 1i*iK/2,m=m)  #dead centre of the rectangle.
+#b <- -1 # lower left
+#b <- 1/sqrt(m) # top right
+#b <- -1/sqrt(m) # top left
+#b <- 1e9*1i # top centre
+
+
+a <- 1   #bottom right hand side corner
+
+
+f <- function(z){1i*log((z-a)*(z-Conj(a))/(z-b)/(z-Conj(b)))}
+
+     x <- seq(from=-K,to=K,len=n)
+     y <- seq(from=0,to=iK,len=n)
+     z <- outer(x,1i*y,"+")
+    fsn <- f(sn(z,m=m))
+@ 
+
+
+<<rectangle_pot_flow_file>>=
+png("rectangle_pot_flow.png",width=800,height=800)
+@ 
+
+<<rectangle_pot_flow_plot>>=
+view(x,y,fsn,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=17,power=0.1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+rect(-K,0,K,iK,lwd=3)
+@ 
+
+<<rectangle_pot_flow_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{rectangle_pot_flow.png}
+    \caption{Potential flow in a rectangle of aspect ratio~2: source
+    and sink of equal \label{box.flow} strength.  Colour scheme as in
+    figure~\ref{upper.halfplane.flow}.  Note the dividing streamline
+    which terminates in a stagnation point on the rectangle boundary}
+  \end{center}
+\end{figure}
+
+\subsection{Bayesian analysis of potential flow}
+
+When considering potential flows, it is often necessary to infer the
+locations of singularities in the flow from sparse and imperfect
+data~\citep{johnson2004}.
+
+Here, I apply the methods of~\cite{kennedy2001}
+and~\cite{kennedy2001a} (hereafter KOH and KOHa respectively) using
+the~\pkg{BACCO} package~\citep{hankin2005} to assess some
+characteristics of potential flow in a rectangle.
+
+Kennedy and O'Hagan considered the following inference problem for a
+set of parameters~$\theta\in{\mathcal R}^q$ that are inputs to a
+computer program.  Given an independent variable~$x\in{\mathcal R}^n$,
+and a set of scalar (``field'') observations~$z=z(x)$, they assume
+
+\begin{equation}
+z(x)=\rho\cdot\eta\left(x,\theta\right)+
+\delta(x)+\epsilon
+\end{equation}
+where~$\rho$ is a constant of proportionality (notionally unity);
+$\eta(\cdot,\cdot)$ a Gaussian process with unknown coefficients;
+$\theta$ the true, but unknown parameter values; $\delta(\cdot)$ a
+model inadequacy term (also a Gaussian process with unknown
+coefficients); and~$\epsilon\sim N(0,\lambda^2)$ uncorrelated normal
+observational errors.
+
+Inferences about~$\eta(\cdot,\cdot)$ are made from point observations
+of the process: Kennedy and O'Hagan call these the ``code
+observations'' on the grounds that their chief motivation was the
+understanding of complex computer codes.
+
+Here, potential flow in a rectangle is considered.  The source is at
+one corner of the rectangle, which is considered to have lower left
+point~$(-1,0)$ and upper right point~$(1,1)$.  The position of the
+sink is unknown.
+
+I now show how the position of the sink may be inferred from a sparse
+and noisy set of observed fluid speeds.  Similar inference problems
+are encountered in practice when considering oceanographic flows such
+as those occurring near deep sea vents, although the geometry is
+generally more complex than considered here.
+
+The independent variable~$\mathbf{x}$ is the two-dimensional position
+within the rectangle, and the field observation~$z(\mathbf{x})$ is the
+fluid speed at that point, plus obervational error~$\epsilon$.  The
+parameter set~$\theta$ thus has two degrees of freedom corresponding
+to the $x-$ and $y-$ coordinates of the sink.
+
+Field observations will be obtained numerically, using the
+\pkg{elliptic} package.  The simulated flowfield has a sink at a {\em
+known} position---in this case the geometric centre of the
+rectangle---and Bayesian methods will be used to infer its position
+using only fluid speed data.
+
+In the terminology of KOHa, dataset~\code{y} corresponds to modelled
+fluid speed, obtained from the appropriate simulations carried out
+with the sink placed at different locations within the rectangle.
+Dataset~\code{z} corresponds to field observations, which in this
+case is fluid speed at several points in the rectangle, obtained from
+simulations with the sink at the centre of the rectangle.
+
+<<bacco_flow,echo=FALSE,print=FALSE,cache=TRUE>>=
+# Choose the size of the computational mesh:
+n <- n_BACCO
+
+# Choose the number of code observations for D1:
+n.code.obs <- 30
+
+# And the number of field observations for D2:
+n.field.obs <- 31
+
+# First, up the D1 design matrix.  Recall that D1 is the set of code
+# observations, which here means the observations of fluid speed when
+# the sink is at a known, specified, position.
+
+set.seed(0)
+
+latin.hypercube <- function (n, d){
+  sapply(seq_len(d), function(...) { (sample(1:n) - 0.5)/n })
+}
+
+
+D1.elliptic <- latin.hypercube(n.code.obs , 4)
+colnames(D1.elliptic) <- c("x","y","x.sink","y.sink")
+D1.elliptic[,c(1,3)] <- (D1.elliptic[,c(1,3)] -0.5)*2
+#D1.elliptic[,c(2,4)] <- D1.elliptic[,c(2,4)] *iK
+
+# now a D2 design matrix.  This is field observations: observations of
+# fluid speed when the sink is at the true, unknown, specified,
+# position.
+D2.elliptic <- latin.hypercube(n.field.obs , 2)
+colnames(D2.elliptic) <- c("x","y")
+D2.elliptic[,1] <- (D2.elliptic[,1] -0.5)*2
+
+
+# Now a function that, given x and y and x.sink and y.sink, returns
+# the log of the fluid speed at x,y:
+
+fluid.speed <- function(x.pos, y.pos, x.sink, y.sink){
+  
+  a <- 1   #bottom right hand side corner
+  b <- sn(x.pos/K + 1i*iK*y.pos,m=m)  #position (x.pos , y.pos)
+  f <- function(z){1i*log((z-a)*(z-Conj(a))/(z-b)/(z-Conj(b)))}
+  
+  x <- seq(from=-K,to=K,len=n)
+  y <- seq(from=0,to=iK,len=n)
+  z <- outer(x,1i*y,"+")
+  potential <- f(sn(z,m=m))
+  
+  get.log.ke <- function(x,y,potential){
+    jj <- Re(potential)
+    jj.x <- cbind(jj[,-1]-jj[,-ncol(jj)],0)
+    jj.y <- rbind(jj[-1,]-jj[-nrow(jj),],0)
+    kinetic.energy <- jj.x^2 + jj.y^2
+    n.x <- round(n * (x-(-1))/2)
+    n.y <- round(n * y)
+    return(log(kinetic.energy[n.x , n.y]+0.01))
+  }
+
+  return(get.log.ke(x.pos,y.pos,potential))
+}
+
+# now fill in code outputs y:
+y.elliptic <- rep(NA,nrow(D1.elliptic))
+for(i in 1:length(y.elliptic)){
+  jj <- D1.elliptic[i,,drop=TRUE]
+  y.elliptic[i] <- fluid.speed(jj[1],jj[2],jj[3],jj[4])
+}
+
+
+# Now do the field observations; here the source is known to be at the
+# centre of the rectangle:
+
+z.elliptic <- rep(NA,nrow(D2.elliptic))
+for(i in 1:length(z.elliptic)){
+  jj <- D2.elliptic[i,,drop=TRUE]
+  z.elliptic[i] <- fluid.speed(jj[1],jj[2],0,0.5)
+}
+
+# Create design matrix plus observations for didactic purposes:
+D1 <- round(cbind(D1.elliptic,observation=y.elliptic),2)
+D2 <- round(cbind(D2.elliptic,observation=z.elliptic),2)
+
+
+# create a data vector:
+d.elliptic <- c(y.elliptic , z.elliptic)
+
+#now a h1.toy() equivalent:
+h1.elliptic <- function(x){
+  out <- c(1,x[1])
+}
+
+#now a H1.toy() equivalent:
+ H1.elliptic <- 
+function (D1) 
+{
+    if (is.vector(D1)) {
+        D1 <- t(D1)
+    }
+    out <- t(apply(D1, 1, h1.elliptic))
+    colnames(out)[1] <- "h1.const"
+    return(out)
+}
+                      
+h2.elliptic <-
+  function(x){
+    c(1,x[1]) 
+  }
+
+H2.elliptic <-
+  function(D2){
+    if (is.vector(D2)) {
+      D2 <- t(D2)
+    }
+    out <- t(apply(D2, 1, h2.elliptic))
+    colnames(out)[1] <- "h2.const"
+    return(out)
+  }
+
+
+#Now an extractor function:
+extractor.elliptic <- 
+function (D1) 
+{
+    return(list(x.star = D1[, 1:2, drop = FALSE], t.vec = D1[, 
+        3:4, drop = FALSE]))
+}
+
+# Now a whole bunch of stuff to define a phi.fun.elliptic()
+# and, after that, to call it:
+phi.fun.elliptic <- 
+function (rho, lambda, psi1, psi1.apriori, psi2, psi2.apriori, 
+    theta.apriori, power) 
+{
+    "pdm.maker.psi1" <- function(psi1) {
+        jj.omega_x <- diag(psi1[1:2])
+        rownames(jj.omega_x) <- names(psi1[1:2])
+        colnames(jj.omega_x) <- names(psi1[1:2])
+        jj.omega_t <- diag(psi1[3:4])
+        rownames(jj.omega_t) <- names(psi1[3:4])
+        colnames(jj.omega_t) <- names(psi1[3:4])
+        sigma1squared <- psi1[5]
+        return(list(omega_x = jj.omega_x, omega_t = jj.omega_t, 
+            sigma1squared = sigma1squared))
+    }
+    "pdm.maker.psi2" <- function(psi1) {
+        jj.omegastar_x <- diag(psi2[1:2])
+        sigma2squared <- psi2[3]
+        return(list(omegastar_x = jj.omegastar_x, sigma2squared = sigma2squared))
+    }
+    jj.mean <- theta.apriori$mean
+    jj.V_theta <- theta.apriori$sigma
+    jj.discard.psi1 <- pdm.maker.psi1(psi1)
+    jj.omega_t <- jj.discard.psi1$omega_t
+    jj.omega_x <- jj.discard.psi1$omega_x
+    jj.sigma1squared <- jj.discard.psi1$sigma1squared
+    jj.discard.psi2 <- pdm.maker.psi2(psi2)
+    jj.omegastar_x <- jj.discard.psi2$omegastar_x
+    jj.sigma2squared <- jj.discard.psi2$sigma2squared
+    jj.omega_t.upper <- chol(jj.omega_t)
+    jj.omega_t.lower <- t(jj.omega_t.upper)
+    jj.omega_x.upper <- chol(jj.omega_x)
+    jj.omega_x.lower <- t(jj.omega_x.upper)
+    jj.a <- solve(solve(jj.V_theta) + 2 * jj.omega_t, solve(jj.V_theta, 
+        jj.mean))
+    jj.b <- t(2 * solve(solve(jj.V_theta) + 2 * jj.omega_t) %*% 
+        jj.omega_t)
+    jj.c <- jj.sigma1squared/sqrt(det(diag(nrow = nrow(jj.V_theta)) + 
+        2 * jj.V_theta %*% jj.omega_t))
+    jj.A <- solve(jj.V_theta + solve(jj.omega_t)/4)
+    jj.A.upper <- chol(jj.A)
+    jj.A.lower <- t(jj.A.upper)
+    list(rho = rho, lambda = lambda, psi1 = psi1, psi1.apriori = psi1.apriori, 
+        psi2 = psi2, psi2.apriori = psi2.apriori, theta.apriori = theta.apriori, 
+        power = power, omega_x = jj.omega_x, omega_t = jj.omega_t, 
+        omegastar_x = jj.omegastar_x, sigma1squared = jj.sigma1squared, 
+        sigma2squared = jj.sigma2squared, omega_x.upper = jj.omega_x.upper, 
+        omega_x.lower = jj.omega_x.lower, omega_t.upper = jj.omega_t.upper, 
+        omega_t.lower = jj.omega_t.lower, a = jj.a, b = jj.b, 
+        c = jj.c, A = jj.A, A.upper = jj.A.upper, A.lower = jj.A.lower)
+}
+
+# OK, that's the function defined.  Now to create some jj.* variables
+# to call it:
+
+jj.psi1 <- c(rep(1,4),0.3)
+names(jj.psi1)[1:4] <- colnames(D1.elliptic)
+names(jj.psi1)[5] <- "sigma1squared"
+
+jj.mean.psi1 <- rep(1,5)
+names(jj.mean.psi1) <- names(jj.psi1)
+
+jj.sigma.psi1 <- diag(0.1,nrow=5)
+rownames(jj.sigma.psi1) <- names(jj.psi1)
+colnames(jj.sigma.psi1) <- names(jj.psi1)
+
+jj.psi2 <- c(1,1,0.3)
+names(jj.psi2)[1:2] <- colnames(D2.elliptic)
+names(jj.psi2)[3] <- "sigma2squared"
+
+jj.mean.psi2 <- rep(1,4)
+names(jj.mean.psi2) <- c("x.sink", "y.sink","rho","lambda")
+
+jj.sigma.psi2 <- diag(0.1,4)
+rownames(jj.sigma.psi2) <- names(jj.mean.psi2)
+colnames(jj.sigma.psi2) <- names(jj.mean.psi2)
+
+jj.mean.th <- c(1,0.5)
+names(jj.mean.th) <- colnames(D1.elliptic)[3:4]
+
+jj.sigma.th <- diag(rep(1,2))
+rownames(jj.sigma.th) <- colnames(D1.elliptic)[3:4]
+colnames(jj.sigma.th) <- colnames(D1.elliptic)[3:4]
+
+# Now call phi.fun.elliptic():
+phi.elliptic <-
+  phi.fun.elliptic(
+                   rho=1,
+                   lambda=0.1,
+                   psi1=jj.psi1,
+                   psi2=jj.psi2,
+                   psi1.apriori=list(mean=jj.mean.psi1, sigma=jj.sigma.psi1),
+                   psi2.apriori=list(mean=jj.mean.psi2, sigma=jj.sigma.psi2),
+                   theta.apriori=list(mean=jj.mean.th, sigma=jj.sigma.th),
+                   power=2
+                   )
+
+# Now an E.theta.elliptic():
+E.theta.elliptic <- 
+function (D2 = NULL, H1 = NULL, x1 = NULL, x2 = NULL, phi, give.mean = TRUE) 
+{
+    if (give.mean) {
+        m_theta <- phi$theta.apriori$mean
+        return(H1(D1.fun(D2, t.vec = m_theta)))
+    }
+    else {
+        out <- matrix(0, 2,2)
+        rownames(out) <- c("h1.const","x")
+        colnames(out) <- c("h1.const","x")
+        return(out)
+    }
+}
+
+#Now an Edash.theta.elliptic().  Because the basis vector is not a
+#function of theta, this is a bit academic as we can use a function
+#that is identical to Edash.theta.toy():
+
+Edash.theta.elliptic <-
+function (x, t.vec, k, H1, fast.but.opaque = FALSE, a = NULL, 
+    b = NULL, phi = NULL) 
+{
+    if (fast.but.opaque) {
+        edash.mean <- a + crossprod(b, t.vec[k, ])
+    }
+    else {
+        V_theta <- phi$theta.apriori$sigma
+        m_theta <- phi$theta.apriori$mean
+        omega_t <- phi$omega_t
+        edash.mean <- solve(solve(V_theta) + 2 * omega_t, solve(V_theta, 
+            m_theta) + 2 * crossprod(omega_t, t.vec[k, ]))
+    }
+    jj <- as.vector(edash.mean)
+    names(jj) <- rownames(edash.mean)
+    edash.mean <- jj
+    return(H1(D1.fun(x, edash.mean)))
+}
+
+
+
+# Define a wrapper for equation 8:
+# First, calculate the constant to subtract to ensure that
+# the support has a maximum of about zero: 
+
+maximum.likelihood.support <- p.eqn8.supp(theta=c(0,1/2), D1=D1.elliptic, D2=D2.elliptic, H1=H1.elliptic, H2=H2.elliptic, d=d.elliptic, include.prior=FALSE, lognormally.distributed=FALSE, return.log=TRUE, phi=phi.elliptic)
+
+support <- function(x){
+p.eqn8.supp(theta=x, D1=D1.elliptic, D2=D2.elliptic, H1=H1.elliptic, H2=H2.elliptic, d=d.elliptic, include.prior=FALSE, lognormally.distributed=FALSE, return.log=TRUE, phi=phi.elliptic) - maximum.likelihood.support
+}
+
+#define a local function called optim() for aesthetic reasons (ie it 
+# improves the appearance of the  call to optim():
+
+optim <- 
+  function(par,fn){
+    stats::optim(par=par,fn=fn,control=list(fnscale = -1))$par
+  }
+
+@ 
+
+The code evaluation design matrix~\code{D1} is chosen according to a
+random Latin hypercube design, and the observation is calculated using
+the \pkg{elliptic} package:
+
+<<head_D1,echo=TRUE,print=TRUE>>=
+head(D1)
+@ 
+
+So the first line shows a simulation with the sink
+at~(\Sexpr{D1[1,3]},\Sexpr{D1[1,4]}); the log of the fluid speed
+at~(\Sexpr{D1[1,1]}, \Sexpr{D1[1,2]}) is~\Sexpr{D1[1,5]}.  There are a
+total of~\Sexpr{n.code.obs} such observations.  Figure~\ref{code.obs}
+shows these points superimposed on the ``true'' flow field.
+
+The field observations are similarly determined:
+<<head_D2,echo=TRUE,print=TRUE>>=
+head(D2)
+@ 
+
+showing that the first field observation, at~(\Sexpr{D2[1,1]},
+\Sexpr{D2[1,2]}), is~\Sexpr{D2[1,3]}.  There are a total
+of~\Sexpr{n.field.obs} such observations.  Figure~\ref{field.obs}
+shows the first code observation in the context of the ``true'' flow
+field.
+
+<<calc_b_sn,echo=FALSE,print=FALSE,cache=TRUE>>=
+b <- sn(D1[1,3] + 1i*D1[1,4],m=m) #point corresponding to first line of D1
+fsnz2 <- f(sn(z,m=m))
+@ 
+
+<<code_obs_file>>=
+png("code_obs.png",width=800,height=800)
+@ 
+
+<<code_obs_plot>>=
+view(x,y,fsnz2,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=-1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+points(x=K*D1[1,1],y=D1[1,2]*iK,pch=4)
+rect(-K,0,K,iK,lwd=3)
+@ 
+
+<<code_obs_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{code_obs.png}
+    \caption{Streamlines\label{code.obs} of first code
+      observation point; field observation point shown as a cross.
+      The sink is at~(\Sexpr{D1[1,3]},\Sexpr{D1[1,4]})}
+  \end{center}
+\end{figure}
+
+<<calc_b2,echo=FALSE,print=FALSE,cache=TRUE>>=
+b <- sn(0 + 1i*iK/2,m=m) 
+fsnzz <- f(sn(z,m=m))
+@ 
+
+
+<<true_flow_file>>=
+png("true_flow.png",width=800,height=800)
+@ 
+
+<<true_flow_plot>>=
+view(x,y,fsnzz,nlevels=44,imag.contour=FALSE,real.cont=TRUE,scheme=-1,drawlabels=FALSE,axes=FALSE,xlab="",ylab="")
+points(x=K*D2[,1],y=D2[,2]*iK,pch=4)
+rect(-K,0,K,iK,lwd=3)
+@ 
+
+<<true_flow_close>>=
+null <- dev.off()
+@ 
+
+\begin{figure}[htbp]
+  \begin{center}
+    \includegraphics{true_flow.png}
+    \caption{Streamlines\label{field.obs}
+      of ``true'' flow; field observation points shown as crosses}
+  \end{center}
+\end{figure}
+
+Kennedy and O'Hagan give, {\em inter alia,} an expression for the
+likelihood of any value of $\theta$ being the true parameter set (in
+this case, the true position of the sink) in terms of the code
+evaluations and field observations.
+
+Here, function \code{support()} calculates the log-likelihood for a
+pair of coordinates of the sink.  This may be evaluated at the centre
+of the rectangle, and again at the top left corner:
+
+
+<<support,echo=TRUE,print=TRUE>>=
+support(c(0,1/2)) #centre of the rectangle
+support(c(-1,1))   #top left corner
+@ 
+
+showing, as expected, that the support is very much larger at the
+centre of the rectangle than the edge (here the arbitrary additive
+constant is such that the support at \code{c(0,1/2)} is exactly zero).
+It is now possible to identify the position of the sink that
+corresponds to maximum support using numerical optimization
+techniques:
+
+
+<<mle_calc,echo=FALSE,print=FALSE,cache=TRUE>>=
+mle <- optim(c(0,1/2),support)
+@ 
+
+\begin{Schunk}
+\begin{Sinput}
+(mle <- optim(c(0,1/2),support))
+\end{Sinput}
+\end{Schunk}
+
+<<print_mle,echo=FALSE,print=TRUE>>=
+mle
+@ 
+
+Thus the maximum likelihood estimate for the sink is a distance of
+about~0.2 from the true position.  The support at this point is
+about~3.9 units of likelihood:
+
+<<support_of_mle,echo=TRUE,print=TRUE>>=
+support(mle)
+@ 
+
+\subsubsection{Discussion of Bayesian statistical analysis}
+
+The above example shows the ideas of KOH being applied
+straightforwardly, but with the novel twist of $\theta$ being
+interpreted as physical characteristics of a fluid flow.  In this
+case~$\theta$ is the coordinates of the sink.
+
+The MLE is better supported than the true position by about~3.9 units
+of likelihood: thus, in the language of~\cite{edwards1992}, the
+hypothesis of $\theta_\mathrm{true}=(0,0.5)$ would not be rejected if
+one accepted Edwards's 2 units of likelihood per degree of freedom.
+
+The discrepancy between~$\hat{\theta}$ and~$\theta_\mathrm{true}$ (a
+distance of about 0.2) may be due to due to the coarseness of the form
+adopted for the basis functions, and better results might be obtained
+by using a more sophisticated system of model inadequacy than the
+simple linear form presented here.
+
+The methods of KOH allow one to make statistically robust statements
+about the physical characteristics of an interesting flow that are
+difficult to make in any other way.
+
+\section{Conclusions}
+
+Elliptic functions are an interesting and instructive branch of
+complex analysis, and are frequently encountered in applied
+mathematics: here they were used to calculate a potential flow field
+in a rectangle.
+
+This paper introduced the \proglang{R} package \pkg{elliptic}, which was then
+used in conjunction with Bayesian statistical methods (the \pkg{BACCO}
+bundle) to make statistically sound inferences about a flow with
+uncertain parameters: in this case the position of the sink was
+estimated from a sparse and noisy dataset.
+
+
+\subsection*{Acknowledgements}
+I would like to acknowledge the many stimulating and helpful comments
+made by the \proglang{R}-help list over the years.
+
+\bibliography{elliptic}
+\end{document}
diff --git a/vignettes/residuetheorem.Rnw b/vignettes/residuetheorem.Rnw
new file mode 100644
index 0000000..273748d
--- /dev/null
+++ b/vignettes/residuetheorem.Rnw
@@ -0,0 +1,162 @@
+\documentclass[nojss]{jss}
+
+\usepackage{dsfont}
+\usepackage{bbm}
+\usepackage{amsfonts}
+\usepackage{wasysym}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%% just as usual
+\author{Robin K. S. Hankin}
+\title{The residue theorem from a numerical perspective}
+%\VignetteIndexEntry{The residue theorem from a numerical perspective}
+%% for pretty printing and a nice hypersummary also set:
+%% \Plainauthor{Achim Zeileis, Second Author} %% comma-separated
+\Plaintitle{The residue theorem from a numerical perspective}
+
+%% an abstract and keywords
+\Abstract{A short vignette illustrating Cauchy's integral theorem
+  using numerical integration}
+\Keywords{Residue theorem, Cauchy formula, Cauchy's integral formula,
+  contour integration, complex integration, Cauchy's theorem}
+
+
+\Address{
+  Robin K. S. Hankin\\
+  Auckland University of Technology\\
+  2-14 Wakefield Street\\
+  Auckland\\
+  New Zealand\\
+  E-mail: \email{hankin.robin at gmail.com}
+}
+
+
+%% need no \usepackage{Sweave.sty}
+\begin{document}
+
+
+<<requirepackage,echo=FALSE,print=FALSE>>=
+require(elliptic,quietly=TRUE)
+@ 
+
+
+In this very short vignette, I will use contour integration to evaluate
+\begin{equation}
+  \int_{x=-\infty}^{\infty}\frac{e^{ix}}{1+x^2}\,dx
+  \end{equation}
+using numerical methods.  This document is part of the \pkg{elliptic}
+package~\cite{hankin2006}. 
+
+The residue theorem tells us that the integral of~$f(z)$ along any
+closed nonintersecting path is equal to~$2\pi i$ times the sum of the
+residues inside it.
+
+Take a semicircular path~$P$ from $-R$ to $+R$ along the real axis,
+then following a semicircle in the upper half plane, of radius $R$ to
+close the loop.  Now consider large R.  Then P encloses a pole at~$i$
+[there is one at $-i$ also, but this is outside P, so irrelevent here]
+at which the residue is~$-i/2e$.  Thus
+
+\begin{equation}
+  \oint_P f(z)\,dz=2\pi i\cdot(-i/2e) = \pi/e
+\end{equation}
+  
+along~$P$; the contribution from the semicircle tends to zero
+as~$R\longrightarrow\infty$; thus the integral along the real axis is
+the whole path integral, or~$\pi/e$.
+
+We can now reproduce this result analytically.  First, choose $R$:
+
+<<chooseR>>=
+R <- 400
+@ 
+
+
+And now define a path~$P$.  First, the semicircle:
+
+<<definesemi>>=
+u1     <- function(x){R*exp(pi*1i*x)}
+u1dash <- function(x){R*pi*1i*exp(pi*1i*x)}
+@ 
+
+and now the straight part along the real axis:
+
+<<straightpart>>=
+u2     <- function(x){R*(2*x-1)}
+u2dash <- function(x){R*2}
+@ 
+
+And  define the function:
+
+<<>>=
+f <- function(z){exp(1i*z)/(1+z^2)}
+@ 
+
+Now carry out the path integral.  I'll do it explicitly, but note that
+the contribution from the first integral should be small:
+
+
+<<ansapp>>=
+answer.approximate <-
+    integrate.contour(f,u1,u1dash) +
+    integrate.contour(f,u2,u2dash) 
+@ 
+
+
+And compare with the analytical value:
+
+<<compareans>>=
+answer.exact <- pi/exp(1)
+abs(answer.approximate - answer.exact)
+@ 
+
+Now try the same thing but integrating over a triangle instead of a
+semicircle, using {\tt integrate.segments()}.  Use a path $P'$ with
+base from $-R$ to $+R$ along the real axis, closed by two straight
+segments, one from $+R$ to $iR$, the other from $iR$ to $-R$:
+
+ 
+<<>>=
+abs(integrate.segments(f,c(-R,R,1i*R))- answer.exact)
+@  
+ 
+
+Observe how much better one can do by integrating over a big square
+instead:
+
+
+<<useabigsquare>>=
+abs(integrate.segments(f,c(-R,R,R+1i*R, -R+1i*R))- answer.exact)
+@ 
+
+
+\subsection{Residue theorem}
+
+
+Function \code{residue()} is a wrapper that takes a function~$f(z)$
+and integrates~$f(z)/\left(z-z_0\right)$ around a closed loop which
+encloses~$z_0$.  If $f(\cdot)$ is holomorphic within~$C$, Cauchy's
+residue theorem states that
+\begin{equation}
+  \oint_C\frac{f(z)}{z-z_0} = f(z_0)
+  \end{equation}
+
+
+
+and we can test this numerically:
+
+<<residuetest>>=
+f <- function(z){sin(z)}
+numerical <- residue(f,z0=1,r=1)
+exact <- sin(1)
+abs(numerical-exact)
+@ 
+
+which is unreasonably accurate, IMO.
+
+\bibliography{elliptic}
+\end{document}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-elliptic.git



More information about the debian-science-commits mailing list